diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 51250bd038..d2e2950654 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -64,6 +64,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard (optional). # Commenting out will disable upload of results to your repo's Code Scanning dashboard - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2 + uses: github/codeql-action/upload-sarif@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v3.29.5 with: sarif_file: results.sarif diff --git a/.github/workflows/test-build-deploy.yml b/.github/workflows/test-build-deploy.yml index 082ca9620e..6b3be24bc8 100644 --- a/.github/workflows/test-build-deploy.yml +++ b/.github/workflows/test-build-deploy.yml @@ -17,7 +17,7 @@ jobs: lint: runs-on: ubuntu-24.04 container: - image: quay.io/cortexproject/build-image:master-7ce1d1b12 + image: quay.io/cortexproject/build-image:master-59491e9aae steps: - name: Checkout Repo uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 @@ -42,11 +42,13 @@ jobs: run: make BUILD_IN_CONTAINER=false check-doc - name: Check White Noise. run: make BUILD_IN_CONTAINER=false check-white-noise + - name: Check Modernize + run: make BUILD_IN_CONTAINER=false check-modernize test: runs-on: ubuntu-24.04 container: - image: quay.io/cortexproject/build-image:master-7ce1d1b12 + image: quay.io/cortexproject/build-image:master-59491e9aae steps: - name: Checkout Repo uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 @@ -64,7 +66,7 @@ jobs: test-no-race: runs-on: ubuntu-24.04 container: - image: quay.io/cortexproject/build-image:master-7ce1d1b12 + image: quay.io/cortexproject/build-image:master-59491e9aae steps: - name: Checkout Repo uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 @@ -93,21 +95,21 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2 + uses: github/codeql-action/init@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v3.29.5 with: languages: go - name: Autobuild - uses: github/codeql-action/autobuild@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2 + uses: github/codeql-action/autobuild@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v3.29.5 - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2 + uses: github/codeql-action/analyze@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v3.29.5 build: runs-on: ubuntu-24.04 container: - image: quay.io/cortexproject/build-image:master-7ce1d1b12 + image: quay.io/cortexproject/build-image:master-59491e9aae steps: - name: Checkout Repo uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 @@ -162,6 +164,7 @@ jobs: - integration_querier - integration_ruler - integration_query_fuzz + - integration_remote_write_v2 steps: - name: Upgrade golang uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 @@ -210,8 +213,7 @@ jobs: docker pull quay.io/cortexproject/cortex:v1.18.1 elif [ "$TEST_TAGS" = "integration_query_fuzz" ]; then docker pull quay.io/cortexproject/cortex:v1.18.1 - docker pull quay.io/prometheus/prometheus:v2.51.0 - docker pull quay.io/prometheus/prometheus:v2.55.1 + docker pull quay.io/prometheus/prometheus:v3.5.0 fi docker pull memcached:1.6.1 docker pull redis:7.0.4-alpine @@ -224,7 +226,7 @@ jobs: export CORTEX_IMAGE="${CORTEX_IMAGE_PREFIX}cortex:$IMAGE_TAG-amd64" export CORTEX_CHECKOUT_DIR="/go/src/github.com/cortexproject/cortex" echo "Running integration tests with image: $CORTEX_IMAGE" - go test -tags=integration,${{ matrix.tags }} -timeout 2400s -v -count=1 ./integration/... + go test -tags=slicelabels,integration,${{ matrix.tags }} -timeout 2400s -v -count=1 ./integration/... env: IMAGE_PREFIX: ${{ secrets.IMAGE_PREFIX }} @@ -247,14 +249,14 @@ jobs: run: | touch build-image/.uptodate MIGRATIONS_DIR=$(pwd)/cmd/cortex/migrations - make BUILD_IMAGE=quay.io/cortexproject/build-image:master-7ce1d1b12 TTY='' configs-integration-test + make BUILD_IMAGE=quay.io/cortexproject/build-image:master-59491e9aae TTY='' configs-integration-test deploy_website: needs: [build, test] if: (github.ref == 'refs/heads/master' || startsWith(github.ref, 'refs/tags/')) && github.repository == 'cortexproject/cortex' runs-on: ubuntu-24.04 container: - image: quay.io/cortexproject/build-image:master-7ce1d1b12 + image: quay.io/cortexproject/build-image:master-59491e9aae steps: - name: Checkout Repo uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 @@ -296,7 +298,7 @@ jobs: if: (github.ref == 'refs/heads/master' || startsWith(github.ref, 'refs/tags/')) && github.repository == 'cortexproject/cortex' runs-on: ubuntu-24.04 container: - image: quay.io/cortexproject/build-image:master-7ce1d1b12 + image: quay.io/cortexproject/build-image:master-59491e9aae steps: - name: Checkout Repo uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 diff --git a/.golangci.yml b/.golangci.yml index 2812394d35..e566cfa72d 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -12,6 +12,8 @@ run: - integration_querier - integration_ruler - integration_query_fuzz + - integration_remote_write_v2 + - slicelabels output: formats: text: diff --git a/ADOPTERS.md b/ADOPTERS.md index def54436f4..12d76a1eb4 100644 --- a/ADOPTERS.md +++ b/ADOPTERS.md @@ -6,6 +6,7 @@ This is the list of organisations that are using Cortex in **production environm * [Amazon Web Services (AWS)](https://aws.amazon.com/prometheus) * [Aspen Mesh](https://aspenmesh.io/) * [Buoyant](https://buoyant.io/) +* [Cabify](https://tech.cabify.com/) * [DigitalOcean](https://www.digitalocean.com/) * [Electronic Arts](https://www.ea.com/) * [Etsy](https://www.etsy.com/) @@ -14,9 +15,12 @@ This is the list of organisations that are using Cortex in **production environm * [KakaoEnterprise](https://kakaocloud.com/) * [MayaData](https://mayadata.io/) * [Northflank](https://northflank.com/) +* [Open-Xchange](https://www.open-xchange.com/) * [Opstrace](https://opstrace.com/) * [PITS Globale Datenrettungsdienste](https://www.pitsdatenrettung.de/) * [Planetary Quantum](https://www.planetary-quantum.com) * [Platform9](https://platform9.com/) * [REWE Digital](https://rewe-digital.com/) +* [Swiggy](https://www.swiggy.in/) * [SysEleven](https://www.syseleven.de/) +* [Twilio](https://www.twilio.com/) diff --git a/CHANGELOG.md b/CHANGELOG.md index 77e9869a0d..9880c3c6d2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,14 +1,16 @@ # Changelog ## master / unreleased -* [FEATURE] Query Frontend: Add support /api/v1/format_query API for formatting queries. #6893 * [CHANGE] StoreGateway/Alertmanager: Add default 5s connection timeout on client. #6603 * [CHANGE] Ingester: Remove EnableNativeHistograms config flag and instead gate keep through new per-tenant limit at ingestion. #6718 * [CHANGE] Validate a tenantID when to use a single tenant resolver. #6727 +* [FEATURE] Distributor: Add an experimental `-distributor.otlp.enable-type-and-unit-labels` flag to add `__type__` and `__unit__` labels for OTLP metrics. #6969 +* [FEATURE] Distributor: Add an experimental `-distributor.otlp.allow-delta-temporality` flag to ingest delta temporality otlp metrics. #6934 * [FEATURE] Query Frontend: Add dynamic interval size for query splitting. This is enabled by configuring experimental flags `querier.max-shards-per-query` and/or `querier.max-fetched-data-duration-per-query`. The split interval size is dynamically increased to maintain a number of shards and total duration fetched below the configured values. #6458 * [FEATURE] Querier/Ruler: Add `query_partial_data` and `rules_partial_data` limits to allow queries/rules to be evaluated with data from a single zone, if other zones are not available. #6526 * [FEATURE] Update prometheus alertmanager version to v0.28.0 and add new integration msteamsv2, jira, and rocketchat. #6590 * [FEATURE] Ingester/StoreGateway: Add `ResourceMonitor` module in Cortex, and add `ResourceBasedLimiter` in Ingesters and StoreGateways. #6674 +* [FEATURE] Support Prometheus remote write 2.0. #6330 * [FEATURE] Ingester: Support out-of-order native histogram ingestion. It automatically enabled when `-ingester.out-of-order-time-window > 0` and `-blocks-storage.tsdb.enable-native-histograms=true`. #6626 #6663 * [FEATURE] Ruler: Add support for percentage based sharding for rulers. #6680 * [FEATURE] Ruler: Add support for group labels. #6665 @@ -20,6 +22,14 @@ * [FEATURE] Compactor: Add support for percentage based sharding for compactors. #6738 * [FEATURE] Querier: Allow choosing PromQL engine via header. #6777 * [FEATURE] Querier: Support for configuring query optimizers and enabling XFunctions in the Thanos engine. #6873 +* [FEATURE] Query Frontend: Add support /api/v1/format_query API for formatting queries. #6893 +* [FEATURE] Query Frontend: Add support for /api/v1/parse_query API (experimental) to parse a PromQL expression and return it as a JSON-formatted AST (abstract syntax tree). #6978 +* [ENHANCEMENT] Parquet Storage: Add support for additional sort columns during Parquet file generation #7003 +* [ENHANCEMENT] Modernizes the entire codebase by using go modernize tool. #7005 +* [ENHANCEMENT] Overrides Exporter: Expose all fields that can be converted to float64. Also, the label value `max_local_series_per_metric` got renamed to `max_series_per_metric`, and `max_local_series_per_user` got renamed to `max_series_per_user`. #6979 +* [ENHANCEMENT] Ingester: Add `cortex_ingester_tsdb_wal_replay_unknown_refs_total` and `cortex_ingester_tsdb_wbl_replay_unknown_refs_total` metrics to track unknown series references during wal/wbl replaying. #6945 +* [ENHANCEMENT] Ruler: Emit an error message when the rule synchronization fails. #6902 +* [ENHANCEMENT] Querier: Support snappy and zstd response compression for `-querier.response-compression` flag. #6848 * [ENHANCEMENT] Tenant Federation: Add a # of query result limit logic when the `-tenant-federation.regex-matcher-enabled` is enabled. #6845 * [ENHANCEMENT] Query Frontend: Add a `cortex_slow_queries_total` metric to track # of slow queries per user. #6859 * [ENHANCEMENT] Query Frontend: Change to return 400 when the tenant resolving fail. #6715 @@ -44,7 +54,7 @@ * [ENHANCEMENT] Distributor: Add min/max schema validation for Native Histogram. #6766 * [ENHANCEMENT] Ingester: Handle runtime errors in query path #6769 * [ENHANCEMENT] Compactor: Support metadata caching bucket for Cleaner. Can be enabled via `-compactor.cleaner-caching-bucket-enabled` flag. #6778 -* [ENHANCEMENT] Distributor: Add ingestion rate limit for Native Histogram. #6794 +* [ENHANCEMENT] Distributor: Add ingestion rate limit for Native Histogram. #6794 and #6994 * [ENHANCEMENT] Ingester: Add active series limit specifically for Native Histogram. #6796 * [ENHANCEMENT] Compactor, Store Gateway: Introduce user scanner strategy and user index. #6780 * [ENHANCEMENT] Querier: Support chunks cache for parquet queryable. #6805 @@ -60,6 +70,15 @@ * [ENHANCEMENT] Querier: Support query limits in parquet queryable. #6870 * [ENHANCEMENT] Ring: Add zone label to ring_members metric. #6900 * [ENHANCEMENT] Ingester: Add new metric `cortex_ingester_push_errors_total` to track reasons for ingester request failures. #6901 +* [ENHANCEMENT] Ring: Expose `detailed_metrics_enabled` for all rings. Default true. #6926 +* [ENHANCEMENT] Parquet Storage: Allow Parquet Queryable to disable fallback to Store Gateway. #6920 +* [ENHANCEMENT] Query Frontend: Add a `format_query` and `parse_query` labels value to the `op` label at `cortex_query_frontend_queries_total` metric. #6925 #6990 +* [ENHANCEMENT] API: add request ID injection to context to enable tracking requests across downstream services. #6895 +* [ENHANCEMENT] gRPC: Add gRPC Channelz monitoring. #6950 +* [ENHANCEMENT] Upgrade build image and Go version to 1.24.6. #6970 #6976 +* [ENHANCEMENT] Implement versioned transactions for writes to DynamoDB ring. #6986 +* [ENHANCEMENT] Add source metadata to requests(api vs ruler) #6947 +* [ENHANCEMENT] Add new metric `cortex_discarded_series` and `cortex_discarded_series_per_labelset` to track number of series that have a discarded sample. #6995 * [BUGFIX] Ingester: Avoid error or early throttling when READONLY ingesters are present in the ring #6517 * [BUGFIX] Ingester: Fix labelset data race condition. #6573 * [BUGFIX] Compactor: Cleaner should not put deletion marker for blocks with no-compact marker. #6576 @@ -79,6 +98,8 @@ * [BUGFIX] Compactor: Delete the prefix `blocks_meta` from the metadata fetcher metrics. #6832 * [BUGFIX] Store Gateway: Avoid race condition by deduplicating entries in bucket stores user scan. #6863 * [BUGFIX] Runtime-config: Change to check tenant limit validation when loading runtime config only for `all`, `distributor`, `querier`, and `ruler` targets. #6880 +* [BUGFIX] Frontend: Fix remote read snappy input due to request string logging when query stats enabled. #7025 +* [BUGFIX] Distributor: Fix the `/distributor/all_user_stats` api to work during rolling updates on ingesters. #7026 ## 1.19.0 2025-02-27 diff --git a/GOVERNANCE.md b/GOVERNANCE.md index 3420219f4c..6465b94cf7 100644 --- a/GOVERNANCE.md +++ b/GOVERNANCE.md @@ -1,6 +1,7 @@ # Cortex Governance +This document defines project governance for the cortex project. Its purpose is to describe how decisions are made on the project and how anyone can influence these decisions. -This document defines project governance for the project. +This governance charter applies to every project under the cortex GitHub organization. The term "cortex project" refers to any work done under the cortexproject GitHub organization and includes the cortexproject/cortex repository itself as well as cortexproject/cortex-tools, cortexproject/cortex-jsonnet and all the other repositories under the cortexproject GitHub organization. ## Voting diff --git a/Makefile b/Makefile index 705e005dac..18a7b16597 100644 --- a/Makefile +++ b/Makefile @@ -87,15 +87,12 @@ $(foreach exe, $(EXES), $(eval $(call dep_exe, $(exe)))) pkg/cortexpb/cortex.pb.go: pkg/cortexpb/cortex.proto pkg/ingester/client/ingester.pb.go: pkg/ingester/client/ingester.proto pkg/distributor/distributorpb/distributor.pb.go: pkg/distributor/distributorpb/distributor.proto -pkg/ingester/wal.pb.go: pkg/ingester/wal.proto pkg/ring/ring.pb.go: pkg/ring/ring.proto pkg/frontend/v1/frontendv1pb/frontend.pb.go: pkg/frontend/v1/frontendv1pb/frontend.proto pkg/frontend/v2/frontendv2pb/frontend.pb.go: pkg/frontend/v2/frontendv2pb/frontend.proto pkg/querier/tripperware/queryrange/queryrange.pb.go: pkg/querier/tripperware/queryrange/queryrange.proto -pkg/querier/tripperware/instantquery/instantquery.pb.go: pkg/querier/tripperware/instantquery/instantquery.proto pkg/querier/tripperware/query.pb.go: pkg/querier/tripperware/query.proto pkg/querier/stats/stats.pb.go: pkg/querier/stats/stats.proto -pkg/distributor/ha_tracker.pb.go: pkg/distributor/ha_tracker.proto pkg/ruler/rulespb/rules.pb.go: pkg/ruler/rulespb/rules.proto pkg/ruler/ruler.pb.go: pkg/ruler/ruler.proto pkg/ring/kv/memberlist/kv.pb.go: pkg/ring/kv/memberlist/kv.proto @@ -115,13 +112,13 @@ build-image/$(UPTODATE): build-image/* SUDO := $(shell docker info >/dev/null 2>&1 || echo "sudo -E") BUILD_IN_CONTAINER := true BUILD_IMAGE ?= $(IMAGE_PREFIX)build-image -LATEST_BUILD_IMAGE_TAG ?= master-7ce1d1b12 +LATEST_BUILD_IMAGE_TAG ?= master-59491e9aae # TTY is parameterized to allow Google Cloud Builder to run builds, # as it currently disallows TTY devices. This value needs to be overridden # in any custom cloudbuild.yaml files TTY := --tty -GO_FLAGS := -ldflags "-X main.Branch=$(GIT_BRANCH) -X main.Revision=$(GIT_REVISION) -X main.Version=$(VERSION) -extldflags \"-static\" -s -w" -tags netgo +GO_FLAGS := -ldflags "-X main.Branch=$(GIT_BRANCH) -X main.Revision=$(GIT_REVISION) -X main.Version=$(VERSION) -extldflags \"-static\" -s -w" -tags "netgo slicelabels" ifeq ($(BUILD_IN_CONTAINER),true) @@ -129,7 +126,7 @@ GOVOLUMES= -v $(shell pwd)/.cache:/go/cache:delegated,z \ -v $(shell pwd)/.pkg:/go/pkg:delegated,z \ -v $(shell pwd):/go/src/github.com/cortexproject/cortex:delegated,z -exes $(EXES) protos $(PROTO_GOS) lint test cover shell mod-check check-protos web-build web-pre web-deploy doc: build-image/$(UPTODATE) +exes $(EXES) protos $(PROTO_GOS) lint test cover shell mod-check check-protos web-build web-pre web-deploy doc modernize: build-image/$(UPTODATE) @mkdir -p $(shell pwd)/.pkg @mkdir -p $(shell pwd)/.cache @echo @@ -177,7 +174,7 @@ lint: golangci-lint run # Ensure no blocklisted package is imported. - GOFLAGS="-tags=requires_docker,integration,integration_alertmanager,integration_backward_compatibility,integration_memberlist,integration_querier,integration_ruler,integration_query_fuzz" faillint -paths "github.com/bmizerany/assert=github.com/stretchr/testify/assert,\ + GOFLAGS="-tags=requires_docker,integration,integration_alertmanager,integration_backward_compatibility,integration_memberlist,integration_querier,integration_ruler,integration_query_fuzz,integration_remote_write_v2" faillint -paths "github.com/bmizerany/assert=github.com/stretchr/testify/assert,\ golang.org/x/net/context=context,\ sync/atomic=go.uber.org/atomic,\ github.com/prometheus/client_golang/prometheus.{MultiError}=github.com/prometheus/prometheus/tsdb/errors.{NewMulti},\ @@ -216,15 +213,15 @@ lint: ./pkg/ruler/... test: - go test -tags netgo -timeout 30m -race -count 1 ./... + go test -tags "netgo slicelabels" -timeout 30m -race -count 1 ./... test-no-race: - go test -tags netgo -timeout 30m -count 1 ./... + go test -tags "netgo slicelabels" -timeout 30m -count 1 ./... cover: $(eval COVERDIR := $(shell mktemp -d coverage.XXXXXXXXXX)) $(eval COVERFILE := $(shell mktemp $(COVERDIR)/unit.XXXXXXXXXX)) - go test -tags netgo -timeout 30m -race -count 1 -coverprofile=$(COVERFILE) ./... + go test -tags netgo,slicelabels -timeout 30m -race -count 1 -coverprofile=$(COVERFILE) ./... go tool cover -html=$(COVERFILE) -o cover.html go tool cover -func=cover.html | tail -n1 @@ -232,7 +229,7 @@ shell: bash configs-integration-test: - /bin/bash -c "go test -v -tags 'netgo integration' -timeout 10m ./pkg/configs/... ./pkg/ruler/..." + /bin/bash -c "go test -v -tags 'netgo integration slicelabels' -timeout 10m ./pkg/configs/... ./pkg/ruler/..." mod-check: GO111MODULE=on go mod download @@ -254,15 +251,19 @@ web-build: web-pre web-deploy: ./tools/website/web-deploy.sh +modernize: + go run golang.org/x/tools/gopls/internal/analysis/modernize/cmd/modernize@v0.20.0 -fix ./... + # Generates the config file documentation. doc: clean-doc - go run ./tools/doc-generator ./docs/configuration/config-file-reference.template > ./docs/configuration/config-file-reference.md - go run ./tools/doc-generator ./docs/blocks-storage/compactor.template > ./docs/blocks-storage/compactor.md - go run ./tools/doc-generator ./docs/blocks-storage/store-gateway.template > ./docs/blocks-storage/store-gateway.md - go run ./tools/doc-generator ./docs/blocks-storage/querier.template > ./docs/blocks-storage/querier.md - go run ./tools/doc-generator ./docs/guides/encryption-at-rest.template > ./docs/guides/encryption-at-rest.md + go run -tags slicelabels ./tools/doc-generator ./docs/configuration/config-file-reference.template > ./docs/configuration/config-file-reference.md + go run -tags slicelabels ./tools/doc-generator ./docs/blocks-storage/compactor.template > ./docs/blocks-storage/compactor.md + go run -tags slicelabels ./tools/doc-generator ./docs/blocks-storage/store-gateway.template > ./docs/blocks-storage/store-gateway.md + go run -tags slicelabels ./tools/doc-generator ./docs/blocks-storage/querier.template > ./docs/blocks-storage/querier.md + go run -tags slicelabels ./tools/doc-generator ./docs/guides/encryption-at-rest.template > ./docs/guides/encryption-at-rest.md embedmd -w docs/operations/requests-mirroring-to-secondary-cluster.md embedmd -w docs/guides/overrides-exporter.md + go run -tags slicelabels ./tools/doc-generator -json-schema > ./schemas/cortex-config-schema.json endif @@ -311,6 +312,9 @@ clean-white-noise: check-white-noise: clean-white-noise @git diff --exit-code --quiet -- '*.md' || (echo "Please remove trailing whitespaces running 'make clean-white-noise'" && false) +check-modernize: modernize + @git diff --exit-code -- . || (echo "Please modernize running 'make modernize'" && false) + web-serve: cd website && hugo --config config.toml --minify -v server diff --git a/README.md b/README.md index 515b199a29..470ffe3ed5 100644 --- a/README.md +++ b/README.md @@ -11,14 +11,14 @@ # Cortex -Cortex is a horizontally scalable, highly available, multi-tenant, long term storage solution for [Prometheus](https://prometheus.io) and [OpenTelemetry Metrics](https://opentelemetry.io/docs/specs/otel/metrics/) +Cortex is a horizontally scalable, highly available, multi-tenant, long-term storage solution for [Prometheus](https://prometheus.io) and [OpenTelemetry Metrics](https://opentelemetry.io/docs/specs/otel/metrics/). ## Features - **Horizontally scalable:** Cortex can run across multiple machines in a cluster, exceeding the throughput and storage of a single machine. - **Highly available:** When run in a cluster, Cortex can replicate data between machines. - **Multi-tenant:** Cortex can isolate data and queries from multiple different independent Prometheus sources in a single cluster. -- **Long term storage:** Cortex supports S3, GCS, Swift and Microsoft Azure for long term storage of metric data. +- **Long-term storage:** Cortex supports S3, GCS, Swift and Microsoft Azure for long-term storage of metric data. ## Documentation @@ -76,13 +76,13 @@ Join us in shaping the future of Cortex, and let's build something amazing toget - Sep 2020 KubeCon talk "Scaling Prometheus: How We Got Some Thanos Into Cortex" ([video](https://www.youtube.com/watch?v=Z5OJzRogAS4), [slides](https://static.sched.com/hosted_files/kccnceu20/ec/2020-08%20-%20KubeCon%20EU%20-%20Cortex%20blocks%20storage.pdf)) - Jul 2020 PromCon talk "Sharing is Caring: Leveraging Open Source to Improve Cortex & Thanos" ([video](https://www.youtube.com/watch?v=2oTLouUvsac), [slides](https://docs.google.com/presentation/d/1OuKYD7-k9Grb7unppYycdmVGWN0Bo0UwdJRySOoPdpg/edit)) - Nov 2019 KubeCon talks "[Cortex 101: Horizontally Scalable Long Term Storage for Prometheus][kubecon-cortex-101]" ([video][kubecon-cortex-101-video], [slides][kubecon-cortex-101-slides]), "[Configuring Cortex for Max - Performance][kubecon-cortex-201]" ([video][kubecon-cortex-201-video], [slides][kubecon-cortex-201-slides], [write up][kubecon-cortex-201-writeup]) and "[Blazin’ Fast PromQL][kubecon-blazin]" ([slides][kubecon-blazin-slides], [video][kubecon-blazin-video], [write up][kubecon-blazin-writeup]) + Performance][kubecon-cortex-201]" ([video][kubecon-cortex-201-video], [slides][kubecon-cortex-201-slides], [write up][kubecon-cortex-201-writeup]) and "[Blazin' Fast PromQL][kubecon-blazin]" ([slides][kubecon-blazin-slides], [video][kubecon-blazin-video], [write up][kubecon-blazin-writeup]) - Nov 2019 PromCon talk "[Two Households, Both Alike in Dignity: Cortex and Thanos][promcon-two-households]" ([video][promcon-two-households-video], [slides][promcon-two-households-slides], [write up][promcon-two-households-writeup]) - May 2019 KubeCon talks; "[Cortex: Intro][kubecon-cortex-intro]" ([video][kubecon-cortex-intro-video], [slides][kubecon-cortex-intro-slides], [blog post][kubecon-cortex-intro-blog]) and "[Cortex: Deep Dive][kubecon-cortex-deepdive]" ([video][kubecon-cortex-deepdive-video], [slides][kubecon-cortex-deepdive-slides]) - Nov 2018 CloudNative London meetup talk; "Cortex: Horizontally Scalable, Highly Available Prometheus" ([slides][cloudnative-london-2018-slides]) - Aug 2018 PromCon panel; "[Prometheus Long-Term Storage Approaches][promcon-2018-panel]" ([video][promcon-2018-video]) - Dec 2018 KubeCon talk; "[Cortex: Infinitely Scalable Prometheus][kubecon-2018-talk]" ([video][kubecon-2018-video], [slides][kubecon-2018-slides]) -- Aug 2017 PromCon talk; "[Cortex: Prometheus as a Service, One Year On][promcon-2017-talk]" ([videos][promcon-2017-video], [slides][promcon-2017-slides], write up [part 1][promcon-2017-writeup-1], [part 2][promcon-2017-writeup-2], [part 3][promcon-2017-writeup-3]) +- Aug 2017 PromCon talk; "[Cortex: Prometheus as a Service, One Year On][promcon-2017-talk]" ([video][promcon-2017-video], [slides][promcon-2017-slides], write up [part 1][promcon-2017-writeup-1], [part 2][promcon-2017-writeup-2], [part 3][promcon-2017-writeup-3]) - Jun 2017 Prometheus London meetup talk; "Cortex: open-source, horizontally-scalable, distributed Prometheus" ([video][prometheus-london-2017-video]) - Dec 2016 KubeCon talk; "Weave Cortex: Multi-tenant, horizontally scalable Prometheus as a Service" ([video][kubecon-2016-video], [slides][kubecon-2016-slides]) - Aug 2016 PromCon talk; "Project Frankenstein: Multitenant, Scale-Out Prometheus": ([video][promcon-2016-video], [slides][promcon-2016-slides]) @@ -90,10 +90,10 @@ Join us in shaping the future of Cortex, and let's build something amazing toget ### Blog Posts - Dec 2020 blog post "[How AWS and Grafana Labs are scaling Cortex for the cloud](https://aws.amazon.com/blogs/opensource/how-aws-and-grafana-labs-are-scaling-cortex-for-the-cloud/)" -- Oct 2020 blog post "[How to switch Cortex from chunks to blocks storage (and why you won’t look back)](https://grafana.com/blog/2020/10/19/how-to-switch-cortex-from-chunks-to-blocks-storage-and-why-you-wont-look-back/)" +- Oct 2020 blog post "[How to switch Cortex from chunks to blocks storage (and why you won't look back)](https://grafana.com/blog/2020/10/19/how-to-switch-cortex-from-chunks-to-blocks-storage-and-why-you-wont-look-back/)" - Oct 2020 blog post "[Now GA: Cortex blocks storage for running Prometheus at scale with reduced operational complexity](https://grafana.com/blog/2020/10/06/now-ga-cortex-blocks-storage-for-running-prometheus-at-scale-with-reduced-operational-complexity/)" - Sep 2020 blog post "[A Tale of Tail Latencies](https://www.weave.works/blog/a-tale-of-tail-latencies)" -- Aug 2020 blog post "[Scaling Prometheus: How we’re pushing Cortex blocks storage to its limit and beyond](https://grafana.com/blog/2020/08/12/scaling-prometheus-how-were-pushing-cortex-blocks-storage-to-its-limit-and-beyond/)" +- Aug 2020 blog post "[Scaling Prometheus: How we're pushing Cortex blocks storage to its limit and beyond](https://grafana.com/blog/2020/08/12/scaling-prometheus-how-were-pushing-cortex-blocks-storage-to-its-limit-and-beyond/)" - Jul 2020 blog post "[How blocks storage in Cortex reduces operational complexity for running Prometheus at massive scale](https://grafana.com/blog/2020/07/29/how-blocks-storage-in-cortex-reduces-operational-complexity-for-running-prometheus-at-massive-scale/)" - Mar 2020 blog post "[Cortex: Zone Aware Replication](https://kenhaines.net/cortex-zone-aware-replication/)" - Mar 2020 blog post "[How we're using gossip to improve Cortex and Loki availability](https://grafana.com/blog/2020/03/25/how-were-using-gossip-to-improve-cortex-and-loki-availability/)" @@ -157,7 +157,7 @@ Join us in shaping the future of Cortex, and let's build something amazing toget ### Amazon Managed Service for Prometheus (AMP) -[Amazon Managed Service for Prometheus (AMP)](https://aws.amazon.com/prometheus/) is a Prometheus-compatible monitoring service that makes it easy to monitor containerized applications at scale. It is a highly available, secure, and managed monitoring for your containers. Get started [here](https://console.aws.amazon.com/prometheus/home). To learn more about the AMP, reference our [documentation](https://docs.aws.amazon.com/prometheus/latest/userguide/what-is-Amazon-Managed-Service-Prometheus.html) and [Getting Started with AMP blog](https://aws.amazon.com/blogs/mt/getting-started-amazon-managed-service-for-prometheus/). +[Amazon Managed Service for Prometheus (AMP)](https://aws.amazon.com/prometheus/) is a Prometheus-compatible monitoring service that makes it easy to monitor containerized applications at scale. It is a highly available, secure, and managed monitoring service for your containers. Get started [here](https://console.aws.amazon.com/prometheus/home). To learn more about AMP, reference our [documentation](https://docs.aws.amazon.com/prometheus/latest/userguide/what-is-Amazon-Managed-Service-Prometheus.html) and [Getting Started with AMP blog](https://aws.amazon.com/blogs/mt/getting-started-amazon-managed-service-for-prometheus/). ## Emeritus Maintainers diff --git a/build-image/Dockerfile b/build-image/Dockerfile index 4952d308fb..2aa5ae80cc 100644 --- a/build-image/Dockerfile +++ b/build-image/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.24.3-bullseye +FROM golang:1.24.6-bullseye ARG goproxyValue ENV GOPROXY=${goproxyValue} RUN apt-get update && apt-get install -y curl file gettext jq unzip protobuf-compiler libprotobuf-dev && \ diff --git a/cmd/cortex/main_test.go b/cmd/cortex/main_test.go index 5c71949129..2a5d01a61a 100644 --- a/cmd/cortex/main_test.go +++ b/cmd/cortex/main_test.go @@ -225,7 +225,6 @@ func TestExpandEnv(t *testing.T) { } for _, test := range tests { - test := test t.Run(test.in, func(t *testing.T) { _ = os.Setenv("y", "y") output := expandEnv([]byte(test.in)) @@ -263,7 +262,6 @@ func TestParseConfigFileParameter(t *testing.T) { {"--config.expand-env --opt1 --config.file=foo", "foo", true}, } for _, test := range tests { - test := test t.Run(test.args, func(t *testing.T) { args := strings.Split(test.args, " ") configFile, expandENV := parseConfigFileParameter(args) diff --git a/cmd/thanosconvert/main.go b/cmd/thanosconvert/main.go index bec41a0027..24d2dbc4b6 100644 --- a/cmd/thanosconvert/main.go +++ b/cmd/thanosconvert/main.go @@ -79,7 +79,7 @@ func main() { } -func fatal(msg string, args ...interface{}) { +func fatal(msg string, args ...any) { fmt.Fprintf(os.Stderr, msg+"\n", args...) os.Exit(1) } diff --git a/docs/api/_index.md b/docs/api/_index.md index 64a6aab3f0..73462ec258 100644 --- a/docs/api/_index.md +++ b/docs/api/_index.md @@ -37,7 +37,8 @@ For the sake of clarity, in this document we have grouped API endpoints by servi | [Instant query](#instant-query) | Querier, Query-frontend || `GET,POST /api/v1/query` | | [Range query](#range-query) | Querier, Query-frontend || `GET,POST /api/v1/query_range` | | [Exemplar query](#exemplar-query) | Querier, Query-frontend || `GET,POST /api/v1/query_exemplars` | -| [Format query](#format-query) | Querier, Query-frontend || `GET,POST /api/v1/format-query` | +| [Format query](#format-query) | Querier, Query-frontend || `GET,POST /api/v1/format_query` | +| [Parse query](#parse-query) | Querier, Query-frontend || `GET,POST /api/v1/parse_query` | | [Get series by label matchers](#get-series-by-label-matchers) | Querier, Query-frontend || `GET,POST /api/v1/series` | | [Get label names](#get-label-names) | Querier, Query-frontend || `GET,POST /api/v1/labels` | | [Get label values](#get-label-values) | Querier, Query-frontend || `GET /api/v1/label/{name}/values` | @@ -384,6 +385,21 @@ _For more information, please check out the Prometheus [fomatting query expressi _Requires [authentication](#authentication)._ +### Parse query + +``` +GET,POST /api/v1/parse_query + +# Legacy +GET,POST /api/v1/parse_query +``` + +Prometheus-compatible parse query endpoint. This endpoint is **experimental**, it parses a PromQL expression and returns it as a JSON-formatted AST (abstract syntax tree) representation. + +_For more information, please check out the Prometheus [Parsing query expressions](https://prometheus.io/docs/prometheus/latest/querying/api/#parsing-a-promql-expressions-into-a-abstract-syntax-tree-ast) documentation._ + +_Requires [authentication](#authentication)._ + ### Get series by label matchers ``` diff --git a/docs/architecture.md b/docs/architecture.md index bbb2ed7ae0..b532d83239 100644 --- a/docs/architecture.md +++ b/docs/architecture.md @@ -21,9 +21,9 @@ Incoming samples (writes from Prometheus) are handled by the [distributor](#dist ## Blocks storage -The blocks storage is based on [Prometheus TSDB](https://prometheus.io/docs/prometheus/latest/storage/): it stores each tenant's time series into their own TSDB which write out their series to a on-disk Block (defaults to 2h block range periods). Each Block is composed by a few files storing the chunks and the block index. +The blocks storage is based on [Prometheus TSDB](https://prometheus.io/docs/prometheus/latest/storage/): it stores each tenant's time series into their own TSDB which writes out their series to an on-disk Block (defaults to 2h block range periods). Each Block is composed of a few files storing the chunks and the block index. -The TSDB chunk files contain the samples for multiple series. The series inside the Chunks are then indexed by a per-block index, which indexes metric names and labels to time series in the chunk files. +The TSDB chunk files contain the samples for multiple series. The series inside the chunks are then indexed by a per-block index, which indexes metric names and labels to time series in the chunk files. The blocks storage doesn't require a dedicated storage backend for the index. The only requirement is an object store for the Block files, which can be: @@ -60,7 +60,7 @@ The **distributor** service is responsible for handling incoming samples from Pr The validation done by the distributor includes: -- The metric labels name are formally correct +- The metric label names are formally correct - The configured max number of labels per metric is respected - The configured max length of a label name and value is respected - The timestamp is not older/newer than the configured min/max time range @@ -80,7 +80,7 @@ The supported KV stores for the HA tracker are: * [Consul](https://www.consul.io) * [Etcd](https://etcd.io) -Note: Memberlist is not supported. Memberlist-based KV store propagates updates using gossip, which is very slow for HA purposes: result is that different distributors may see different Prometheus server as elected HA replica, which is definitely not desirable. +Note: Memberlist is not supported. Memberlist-based KV store propagates updates using gossip, which is very slow for HA purposes: the result is that different distributors may see different Prometheus servers as the elected HA replica, which is definitely not desirable. For more information, please refer to [config for sending HA pairs data to Cortex](guides/ha-pair-handling.md) in the documentation. @@ -97,11 +97,11 @@ The trade-off associated with the latter is that writes are more balanced across #### The hash ring -A hash ring (stored in a key-value store) is used to achieve consistent hashing for the series sharding and replication across the ingesters. All [ingesters](#ingester) register themselves into the hash ring with a set of tokens they own; each token is a random unsigned 32-bit number. Each incoming series is [hashed](#hashing) in the distributor and then pushed to the ingester owning the tokens range for the series hash number plus N-1 subsequent ingesters in the ring, where N is the replication factor. +A hash ring (stored in a key-value store) is used to achieve consistent hashing for the series sharding and replication across the ingesters. All [ingesters](#ingester) register themselves into the hash ring with a set of tokens they own; each token is a random unsigned 32-bit number. Each incoming series is [hashed](#hashing) in the distributor and then pushed to the ingester owning the token's range for the series hash number plus N-1 subsequent ingesters in the ring, where N is the replication factor. To do the hash lookup, distributors find the smallest appropriate token whose value is larger than the [hash of the series](#hashing). When the replication factor is larger than 1, the next subsequent tokens (clockwise in the ring) that belong to different ingesters will also be included in the result. -The effect of this hash set up is that each token that an ingester owns is responsible for a range of hashes. If there are three tokens with values 0, 25, and 50, then a hash of 3 would be given to the ingester that owns the token 25; the ingester owning token 25 is responsible for the hash range of 1-25. +The effect of this hash setup is that each token that an ingester owns is responsible for a range of hashes. If there are three tokens with values 0, 25, and 50, then a hash of 3 would be given to the ingester that owns token 25; the ingester owning token 25 is responsible for the hash range of 1-25. The supported KV stores for the hash ring are: @@ -111,7 +111,7 @@ The supported KV stores for the hash ring are: #### Quorum consistency -Since all distributors share access to the same hash ring, write requests can be sent to any distributor and you can setup a stateless load balancer in front of it. +Since all distributors share access to the same hash ring, write requests can be sent to any distributor and you can set up a stateless load balancer in front of it. To ensure consistent query results, Cortex uses [Dynamo-style](https://www.allthingsdistributed.com/files/amazon-dynamo-sosp2007.pdf) quorum consistency on reads and writes. This means that the distributor will wait for a positive response of at least one half plus one of the ingesters to send the sample to before successfully responding to the Prometheus write request. @@ -125,35 +125,35 @@ The **ingester** service is responsible for writing incoming series to a [long-t Incoming series are not immediately written to the storage but kept in memory and periodically flushed to the storage (by default, 2 hours). For this reason, the [queriers](#querier) may need to fetch samples both from ingesters and long-term storage while executing a query on the read path. -Ingesters contain a **lifecycler** which manages the lifecycle of an ingester and stores the **ingester state** in the [hash ring](#the-hash-ring). Each ingester could be in one of the following states: +Ingesters contain a **lifecycler** which manages the lifecycle of an ingester and stores the **ingester state** in the [hash ring](#the-hash-ring). Each ingester can be in one of the following states: - **`PENDING`**
- The ingester has just started. While in this state, the ingester doesn't receive neither write and read requests. + The ingester has just started. While in this state, the ingester doesn't receive either write or read requests. - **`JOINING`**
- The ingester is starting up and joining the ring. While in this state the ingester doesn't receive neither write and read requests. The ingester will join the ring using tokens loaded from disk (if `-ingester.tokens-file-path` is configured) or generate a set of new random ones. Finally, the ingester optionally observes the ring for tokens conflicts and then, once any conflict is resolved, will move to `ACTIVE` state. + The ingester is starting up and joining the ring. While in this state the ingester doesn't receive either write or read requests. The ingester will join the ring using tokens loaded from disk (if `-ingester.tokens-file-path` is configured) or generate a set of new random ones. Finally, the ingester optionally observes the ring for token conflicts and then, once any conflict is resolved, will move to `ACTIVE` state. - **`ACTIVE`**
The ingester is up and running. While in this state the ingester can receive both write and read requests. - **`LEAVING`**
- The ingester is shutting down and leaving the ring. While in this state the ingester doesn't receive write requests, while it could receive read requests. + The ingester is shutting down and leaving the ring. While in this state the ingester doesn't receive write requests, while it can still receive read requests. - **`UNHEALTHY`**
The ingester has failed to heartbeat to the ring's KV Store. While in this state, distributors skip the ingester while building the replication set for incoming series and the ingester does not receive write or read requests. Ingesters are **semi-stateful**. -#### Ingesters failure and data loss +#### Ingester failure and data loss If an ingester process crashes or exits abruptly, all the in-memory series that have not yet been flushed to the long-term storage will be lost. There are two main ways to mitigate this failure mode: 1. Replication 2. Write-ahead log (WAL) -The **replication** is used to hold multiple (typically 3) replicas of each time series in the ingesters. If the Cortex cluster loses an ingester, the in-memory series held by the lost ingester are also replicated to at least another ingester. In the event of a single ingester failure, no time series samples will be lost. However, in the event of multiple ingester failures, time series may be potentially lost if the failures affect all the ingesters holding the replicas of a specific time series. +The **replication** is used to hold multiple (typically 3) replicas of each time series in the ingesters. If the Cortex cluster loses an ingester, the in-memory series held by the lost ingester are also replicated to at least one other ingester. In the event of a single ingester failure, no time series samples will be lost. However, in the event of multiple ingester failures, time series may be potentially lost if the failures affect all the ingesters holding the replicas of a specific time series. The **write-ahead log** (WAL) is used to write to a persistent disk all incoming series samples until they're flushed to the long-term storage. In the event of an ingester failure, a subsequent process restart will replay the WAL and recover the in-memory series samples. -Contrary to the sole replication and given the persistent disk data is not lost, in the event of multiple ingesters failure each ingester will recover the in-memory series samples from WAL upon subsequent restart. The replication is still recommended in order to ensure no temporary failures on the read path in the event of a single ingester failure. +Contrary to the sole replication and given that the persistent disk data is not lost, in the event of multiple ingester failures each ingester will recover the in-memory series samples from WAL upon subsequent restart. The replication is still recommended in order to ensure no temporary failures on the read path in the event of a single ingester failure. -#### Ingesters write de-amplification +#### Ingester write de-amplification Ingesters store recently received samples in-memory in order to perform write de-amplification. If the ingesters would immediately write received samples to the long-term storage, the system would be very difficult to scale due to the very high pressure on the storage. For this reason, the ingesters batch and compress samples in-memory and periodically flush them out to the storage. @@ -169,10 +169,10 @@ Queriers are **stateless** and can be scaled up and down as needed. ### Compactor -The **compactor** is a service which is responsible to: +The **compactor** is a service which is responsible for: -- Compact multiple blocks of a given tenant into a single optimized larger block. This helps to reduce storage costs (deduplication, index size reduction), and increase query speed (querying fewer blocks is faster). -- Keep the per-tenant bucket index updated. The [bucket index](./blocks-storage/bucket-index.md) is used by [queriers](./blocks-storage/querier.md), [store-gateways](#store-gateway) and rulers to discover new blocks in the storage. +- Compacting multiple blocks of a given tenant into a single optimized larger block. This helps to reduce storage costs (deduplication, index size reduction), and increase query speed (querying fewer blocks is faster). +- Keeping the per-tenant bucket index updated. The [bucket index](./blocks-storage/bucket-index.md) is used by [queriers](./blocks-storage/querier.md), [store-gateways](#store-gateway) and rulers to discover new blocks in the storage. For more information, see the [compactor documentation](./blocks-storage/compactor.md). @@ -190,7 +190,7 @@ The store gateway is **semi-stateful**. ### Query frontend -The **query frontend** is an **optional service** providing the querier's API endpoints and can be used to accelerate the read path. When the query frontend is in place, incoming query requests should be directed to the query frontend instead of the queriers. The querier service will be still required within the cluster, in order to execute the actual queries. +The **query frontend** is an **optional service** providing the querier's API endpoints and can be used to accelerate the read path. When the query frontend is in place, incoming query requests should be directed to the query frontend instead of the queriers. The querier service will still be required within the cluster, in order to execute the actual queries. The query frontend internally performs some query adjustments and holds queries in an internal queue. In this setup, queriers act as workers which pull jobs from the queue, execute them, and return them to the query-frontend for aggregation. Queriers need to be configured with the query frontend address (via the `-querier.frontend-address` CLI flag) in order to allow them to connect to the query frontends. @@ -199,15 +199,15 @@ Query frontends are **stateless**. However, due to how the internal queue works, Flow of the query in the system when using query-frontend: 1) Query is received by query frontend, which can optionally split it or serve from the cache. -2) Query frontend stores the query into in-memory queue, where it waits for some querier to pick it up. +2) Query frontend stores the query into an in-memory queue, where it waits for some querier to pick it up. 3) Querier picks up the query, and executes it. 4) Querier sends result back to query-frontend, which then forwards it to the client. -Query frontend can also be used with any Prometheus-API compatible service. In this mode Cortex can be used as an query accelerator with it's caching and splitting features on other prometheus query engines like Thanos Querier or your own Prometheus server. Query frontend needs to be configured with downstream url address(via the `-frontend.downstream-url` CLI flag), which is the endpoint of the prometheus server intended to be connected with Cortex. +Query frontend can also be used with any Prometheus-API compatible service. In this mode Cortex can be used as a query accelerator with its caching and splitting features on other prometheus query engines like Thanos Querier or your own Prometheus server. Query frontend needs to be configured with downstream url address (via the `-frontend.downstream-url` CLI flag), which is the endpoint of the prometheus server intended to be connected with Cortex. #### Queueing -The query frontend queuing mechanism is used to: +The query frontend queueing mechanism is used to: * Ensure that large queries, that could cause an out-of-memory (OOM) error in the querier, will be retried on failure. This allows administrators to under-provision memory for queries, or optimistically run more small queries in parallel, which helps to reduce the total cost of ownership (TCO). * Prevent multiple large requests from being convoyed on a single querier by distributing them across all queriers using a first-in/first-out queue (FIFO). @@ -223,7 +223,7 @@ The query frontend supports caching query results and reuses them on subsequent ### Query Scheduler -Query Scheduler is an **optional** service that moves the internal queue from query frontend into separate component. +Query Scheduler is an **optional** service that moves the internal queue from query frontend into a separate component. This enables independent scaling of query frontends and number of queues (query scheduler). In order to use query scheduler, both query frontend and queriers must be configured with query scheduler address @@ -232,10 +232,10 @@ In order to use query scheduler, both query frontend and queriers must be config Flow of the query in the system changes when using query scheduler: 1) Query is received by query frontend, which can optionally split it or serve from the cache. -2) Query frontend forwards the query to random query scheduler process. -3) Query scheduler stores the query into in-memory queue, where it waits for some querier to pick it up. -3) Querier picks up the query, and executes it. -4) Querier sends result back to query-frontend, which then forwards it to the client. +2) Query frontend forwards the query to a random query scheduler process. +3) Query scheduler stores the query into an in-memory queue, where it waits for some querier to pick it up. +4) Querier picks up the query, and executes it. +5) Querier sends result back to query-frontend, which then forwards it to the client. Query schedulers are **stateless**. It is recommended to run two replicas to make sure queries can still be serviced while one replica is restarting. @@ -263,7 +263,7 @@ If all of the alertmanager nodes failed simultaneously there would be a loss of ### Configs API The **configs API** is an **optional service** managing the configuration of Rulers and Alertmanagers. -It provides APIs to get/set/update the ruler and alertmanager configurations and store them into backend. -Current supported backend are PostgreSQL and in-memory. +It provides APIs to get/set/update the ruler and alertmanager configurations and store them in the backend. +Current supported backends are PostgreSQL and in-memory. Configs API is **stateless**. diff --git a/docs/blocks-storage/compactor.md b/docs/blocks-storage/compactor.md index dc7daeb8a9..e914882395 100644 --- a/docs/blocks-storage/compactor.md +++ b/docs/blocks-storage/compactor.md @@ -195,8 +195,8 @@ compactor: sharding_ring: kvstore: - # Backend storage to use for the ring. Supported values are: consul, etcd, - # inmemory, memberlist, multi. + # Backend storage to use for the ring. Supported values are: consul, + # dynamodb, etcd, inmemory, memberlist, multi. # CLI flag: -compactor.ring.store [store: | default = "consul"] @@ -204,6 +204,10 @@ compactor: # CLI flag: -compactor.ring.prefix [prefix: | default = "collectors/"] + # The consul_config configures the consul client. + # The CLI flags prefix for this block config is: compactor.ring + [consul: ] + dynamodb: # Region to access dynamodb. # CLI flag: -compactor.ring.dynamodb.region @@ -229,10 +233,6 @@ compactor: # CLI flag: -compactor.ring.dynamodb.timeout [timeout: | default = 2m] - # The consul_config configures the consul client. - # The CLI flags prefix for this block config is: compactor.ring - [consul: ] - # The etcd_config configures the etcd client. # The CLI flags prefix for this block config is: compactor.ring [etcd: ] @@ -268,6 +268,12 @@ compactor: # CLI flag: -compactor.auto-forget-delay [auto_forget_delay: | default = 2m] + # Set to true to enable ring detailed metrics. These metrics provide + # detailed information, such as token count and ownership per tenant. + # Disabling them can significantly decrease the number of metrics emitted. + # CLI flag: -compactor.ring.detailed-metrics-enabled + [detailed_metrics_enabled: | default = true] + # Minimum time to wait for ring stability at startup. 0 to disable. # CLI flag: -compactor.ring.wait-stability-min-duration [wait_stability_min_duration: | default = 1m] diff --git a/docs/blocks-storage/querier.md b/docs/blocks-storage/querier.md index 04d7430742..855ff5c902 100644 --- a/docs/blocks-storage/querier.md +++ b/docs/blocks-storage/querier.md @@ -127,7 +127,7 @@ querier: [per_step_stats_enabled: | default = false] # Use compression for metrics query API or instant and range query APIs. - # Supports 'gzip' and '' (disable compression) + # Supported compression 'gzip', 'snappy', 'zstd' and '' (disable compression) # CLI flag: -querier.response-compression [response_compression: | default = "gzip"] @@ -278,6 +278,30 @@ querier: # [Experimental] If true, experimental promQL functions are enabled. # CLI flag: -querier.enable-promql-experimental-functions [enable_promql_experimental_functions: | default = false] + + # [Experimental] If true, querier will try to query the parquet files if + # available. + # CLI flag: -querier.enable-parquet-queryable + [enable_parquet_queryable: | default = false] + + # [Experimental] Maximum size of the Parquet queryable shard cache. 0 to + # disable. + # CLI flag: -querier.parquet-queryable-shard-cache-size + [parquet_queryable_shard_cache_size: | default = 512] + + # [Experimental] Parquet queryable's default block store to query. Valid + # options are tsdb and parquet. If it is set to tsdb, parquet queryable always + # fallback to store gateway. + # CLI flag: -querier.parquet-queryable-default-block-store + [parquet_queryable_default_block_store: | default = "parquet"] + + # [Experimental] Disable Parquet queryable to fallback queries to Store + # Gateway if the block is not available as Parquet files but available in + # TSDB. Setting this to true will disable the fallback and users can remove + # Store Gateway. But need to make sure Parquet files are created before it is + # queryable. + # CLI flag: -querier.parquet-queryable-fallback-disabled + [parquet_queryable_fallback_disabled: | default = false] ``` ### `blocks_storage_config` @@ -1394,6 +1418,255 @@ blocks_storage: # CLI flag: -blocks-storage.bucket-store.metadata-cache.partitioned-groups-list-ttl [partitioned_groups_list_ttl: | default = 0s] + parquet_labels_cache: + # The parquet labels cache backend type. Single or Multiple cache backend + # can be provided. Supported values in single cache: memcached, redis, + # inmemory, and '' (disable). Supported values in multi level cache: a + # comma-separated list of (inmemory, memcached, redis) + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.backend + [backend: | default = ""] + + inmemory: + # Maximum size in bytes of in-memory parquet-labels cache used (shared + # between all tenants). + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.inmemory.max-size-bytes + [max_size_bytes: | default = 1073741824] + + memcached: + # Comma separated list of memcached addresses. Supported prefixes are: + # dns+ (looked up as an A/AAAA query), dnssrv+ (looked up as a SRV + # query, dnssrvnoa+ (looked up as a SRV query, with no A/AAAA lookup + # made after that). + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.addresses + [addresses: | default = ""] + + # The socket read/write timeout. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.timeout + [timeout: | default = 100ms] + + # The maximum number of idle connections that will be maintained per + # address. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.max-idle-connections + [max_idle_connections: | default = 16] + + # The maximum number of concurrent asynchronous operations can occur. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.max-async-concurrency + [max_async_concurrency: | default = 3] + + # The maximum number of enqueued asynchronous operations allowed. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.max-async-buffer-size + [max_async_buffer_size: | default = 10000] + + # The maximum number of concurrent connections running get operations. + # If set to 0, concurrency is unlimited. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.max-get-multi-concurrency + [max_get_multi_concurrency: | default = 100] + + # The maximum number of keys a single underlying get operation should + # run. If more keys are specified, internally keys are split into + # multiple batches and fetched concurrently, honoring the max + # concurrency. If set to 0, the max batch size is unlimited. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.max-get-multi-batch-size + [max_get_multi_batch_size: | default = 0] + + # The maximum size of an item stored in memcached. Bigger items are not + # stored. If set to 0, no maximum size is enforced. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.max-item-size + [max_item_size: | default = 1048576] + + # Use memcached auto-discovery mechanism provided by some cloud provider + # like GCP and AWS + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.auto-discovery + [auto_discovery: | default = false] + + set_async_circuit_breaker_config: + # If true, enable circuit breaker. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.set-async.circuit-breaker.enabled + [enabled: | default = false] + + # Maximum number of requests allowed to pass through when the circuit + # breaker is half-open. If set to 0, by default it allows 1 request. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.set-async.circuit-breaker.half-open-max-requests + [half_open_max_requests: | default = 10] + + # Period of the open state after which the state of the circuit + # breaker becomes half-open. If set to 0, by default open duration is + # 60 seconds. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.set-async.circuit-breaker.open-duration + [open_duration: | default = 5s] + + # Minimal requests to trigger the circuit breaker. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.set-async.circuit-breaker.min-requests + [min_requests: | default = 50] + + # Consecutive failures to determine if the circuit breaker should + # open. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.set-async.circuit-breaker.consecutive-failures + [consecutive_failures: | default = 5] + + # Failure percentage to determine if the circuit breaker should open. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.set-async.circuit-breaker.failure-percent + [failure_percent: | default = 0.05] + + redis: + # Comma separated list of redis addresses. Supported prefixes are: dns+ + # (looked up as an A/AAAA query), dnssrv+ (looked up as a SRV query, + # dnssrvnoa+ (looked up as a SRV query, with no A/AAAA lookup made after + # that). + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.addresses + [addresses: | default = ""] + + # Redis username. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.username + [username: | default = ""] + + # Redis password. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.password + [password: | default = ""] + + # Database to be selected after connecting to the server. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.db + [db: | default = 0] + + # Specifies the master's name. Must be not empty for Redis Sentinel. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.master-name + [master_name: | default = ""] + + # The maximum number of concurrent GetMulti() operations. If set to 0, + # concurrency is unlimited. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.max-get-multi-concurrency + [max_get_multi_concurrency: | default = 100] + + # The maximum size per batch for mget. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.get-multi-batch-size + [get_multi_batch_size: | default = 100] + + # The maximum number of concurrent SetMulti() operations. If set to 0, + # concurrency is unlimited. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.max-set-multi-concurrency + [max_set_multi_concurrency: | default = 100] + + # The maximum size per batch for pipeline set. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.set-multi-batch-size + [set_multi_batch_size: | default = 100] + + # The maximum number of concurrent asynchronous operations can occur. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.max-async-concurrency + [max_async_concurrency: | default = 3] + + # The maximum number of enqueued asynchronous operations allowed. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.max-async-buffer-size + [max_async_buffer_size: | default = 10000] + + # Client dial timeout. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.dial-timeout + [dial_timeout: | default = 5s] + + # Client read timeout. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.read-timeout + [read_timeout: | default = 3s] + + # Client write timeout. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.write-timeout + [write_timeout: | default = 3s] + + # Whether to enable tls for redis connection. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.tls-enabled + [tls_enabled: | default = false] + + # Path to the client certificate file, which will be used for + # authenticating with the server. Also requires the key path to be + # configured. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.tls-cert-path + [tls_cert_path: | default = ""] + + # Path to the key file for the client certificate. Also requires the + # client certificate to be configured. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.tls-key-path + [tls_key_path: | default = ""] + + # Path to the CA certificates file to validate server certificate + # against. If not set, the host's root CA certificates are used. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.tls-ca-path + [tls_ca_path: | default = ""] + + # Override the expected name on the server certificate. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.tls-server-name + [tls_server_name: | default = ""] + + # Skip validating server certificate. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.tls-insecure-skip-verify + [tls_insecure_skip_verify: | default = false] + + # If not zero then client-side caching is enabled. Client-side caching + # is when data is stored in memory instead of fetching data each time. + # See https://redis.io/docs/manual/client-side-caching/ for more info. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.cache-size + [cache_size: | default = 0] + + set_async_circuit_breaker_config: + # If true, enable circuit breaker. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.set-async.circuit-breaker.enabled + [enabled: | default = false] + + # Maximum number of requests allowed to pass through when the circuit + # breaker is half-open. If set to 0, by default it allows 1 request. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.set-async.circuit-breaker.half-open-max-requests + [half_open_max_requests: | default = 10] + + # Period of the open state after which the state of the circuit + # breaker becomes half-open. If set to 0, by default open duration is + # 60 seconds. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.set-async.circuit-breaker.open-duration + [open_duration: | default = 5s] + + # Minimal requests to trigger the circuit breaker. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.set-async.circuit-breaker.min-requests + [min_requests: | default = 50] + + # Consecutive failures to determine if the circuit breaker should + # open. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.set-async.circuit-breaker.consecutive-failures + [consecutive_failures: | default = 5] + + # Failure percentage to determine if the circuit breaker should open. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.set-async.circuit-breaker.failure-percent + [failure_percent: | default = 0.05] + + multilevel: + # The maximum number of concurrent asynchronous operations can occur + # when backfilling cache items. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.multilevel.max-async-concurrency + [max_async_concurrency: | default = 3] + + # The maximum number of enqueued asynchronous operations allowed when + # backfilling cache items. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.multilevel.max-async-buffer-size + [max_async_buffer_size: | default = 10000] + + # The maximum number of items to backfill per asynchronous operation. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.multilevel.max-backfill-items + [max_backfill_items: | default = 10000] + + # Size of each subrange that bucket object is split into for better + # caching. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.subrange-size + [subrange_size: | default = 16000] + + # Maximum number of sub-GetRange requests that a single GetRange request + # can be split into when fetching parquet labels file. Zero or negative + # value = unlimited number of sub-requests. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.max-get-range-requests + [max_get_range_requests: | default = 3] + + # TTL for caching object attributes for parquet labels file. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.attributes-ttl + [attributes_ttl: | default = 168h] + + # TTL for caching individual subranges. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.subrange-ttl + [subrange_ttl: | default = 24h] + # Maximum number of entries in the regex matchers cache. 0 to disable. # CLI flag: -blocks-storage.bucket-store.matchers-cache-max-items [matchers_cache_max_items: | default = 0] diff --git a/docs/blocks-storage/store-gateway.md b/docs/blocks-storage/store-gateway.md index ee2d307d3d..506cf0f32a 100644 --- a/docs/blocks-storage/store-gateway.md +++ b/docs/blocks-storage/store-gateway.md @@ -210,8 +210,8 @@ store_gateway: # This option needs be set both on the store-gateway and querier when # running in microservices mode. kvstore: - # Backend storage to use for the ring. Supported values are: consul, etcd, - # inmemory, memberlist, multi. + # Backend storage to use for the ring. Supported values are: consul, + # dynamodb, etcd, inmemory, memberlist, multi. # CLI flag: -store-gateway.sharding-ring.store [store: | default = "consul"] @@ -219,6 +219,11 @@ store_gateway: # CLI flag: -store-gateway.sharding-ring.prefix [prefix: | default = "collectors/"] + # The consul_config configures the consul client. + # The CLI flags prefix for this block config is: + # store-gateway.sharding-ring + [consul: ] + dynamodb: # Region to access dynamodb. # CLI flag: -store-gateway.sharding-ring.dynamodb.region @@ -244,11 +249,6 @@ store_gateway: # CLI flag: -store-gateway.sharding-ring.dynamodb.timeout [timeout: | default = 2m] - # The consul_config configures the consul client. - # The CLI flags prefix for this block config is: - # store-gateway.sharding-ring - [consul: ] - # The etcd_config configures the etcd client. # The CLI flags prefix for this block config is: # store-gateway.sharding-ring @@ -303,6 +303,12 @@ store_gateway: # CLI flag: -store-gateway.sharding-ring.keep-instance-in-the-ring-on-shutdown [keep_instance_in_the_ring_on_shutdown: | default = false] + # Set to true to enable ring detailed metrics. These metrics provide + # detailed information, such as token count and ownership per tenant. + # Disabling them can significantly decrease the number of metrics emitted. + # CLI flag: -store-gateway.sharding-ring.detailed-metrics-enabled + [detailed_metrics_enabled: | default = true] + # Minimum time to wait for ring stability at startup. 0 to disable. # CLI flag: -store-gateway.sharding-ring.wait-stability-min-duration [wait_stability_min_duration: | default = 1m] @@ -351,12 +357,6 @@ store_gateway: query_protection: rejection: - # EXPERIMENTAL: Enable query rejection feature, where the component return - # 503 to all incoming query requests when the configured thresholds are - # breached. - # CLI flag: -store-gateway.query-protection.rejection.enabled - [enabled: | default = false] - threshold: # EXPERIMENTAL: Max CPU utilization that this ingester can reach before # rejecting new query request (across all tenants) in percentage, @@ -1504,6 +1504,255 @@ blocks_storage: # CLI flag: -blocks-storage.bucket-store.metadata-cache.partitioned-groups-list-ttl [partitioned_groups_list_ttl: | default = 0s] + parquet_labels_cache: + # The parquet labels cache backend type. Single or Multiple cache backend + # can be provided. Supported values in single cache: memcached, redis, + # inmemory, and '' (disable). Supported values in multi level cache: a + # comma-separated list of (inmemory, memcached, redis) + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.backend + [backend: | default = ""] + + inmemory: + # Maximum size in bytes of in-memory parquet-labels cache used (shared + # between all tenants). + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.inmemory.max-size-bytes + [max_size_bytes: | default = 1073741824] + + memcached: + # Comma separated list of memcached addresses. Supported prefixes are: + # dns+ (looked up as an A/AAAA query), dnssrv+ (looked up as a SRV + # query, dnssrvnoa+ (looked up as a SRV query, with no A/AAAA lookup + # made after that). + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.addresses + [addresses: | default = ""] + + # The socket read/write timeout. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.timeout + [timeout: | default = 100ms] + + # The maximum number of idle connections that will be maintained per + # address. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.max-idle-connections + [max_idle_connections: | default = 16] + + # The maximum number of concurrent asynchronous operations can occur. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.max-async-concurrency + [max_async_concurrency: | default = 3] + + # The maximum number of enqueued asynchronous operations allowed. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.max-async-buffer-size + [max_async_buffer_size: | default = 10000] + + # The maximum number of concurrent connections running get operations. + # If set to 0, concurrency is unlimited. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.max-get-multi-concurrency + [max_get_multi_concurrency: | default = 100] + + # The maximum number of keys a single underlying get operation should + # run. If more keys are specified, internally keys are split into + # multiple batches and fetched concurrently, honoring the max + # concurrency. If set to 0, the max batch size is unlimited. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.max-get-multi-batch-size + [max_get_multi_batch_size: | default = 0] + + # The maximum size of an item stored in memcached. Bigger items are not + # stored. If set to 0, no maximum size is enforced. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.max-item-size + [max_item_size: | default = 1048576] + + # Use memcached auto-discovery mechanism provided by some cloud provider + # like GCP and AWS + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.auto-discovery + [auto_discovery: | default = false] + + set_async_circuit_breaker_config: + # If true, enable circuit breaker. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.set-async.circuit-breaker.enabled + [enabled: | default = false] + + # Maximum number of requests allowed to pass through when the circuit + # breaker is half-open. If set to 0, by default it allows 1 request. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.set-async.circuit-breaker.half-open-max-requests + [half_open_max_requests: | default = 10] + + # Period of the open state after which the state of the circuit + # breaker becomes half-open. If set to 0, by default open duration is + # 60 seconds. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.set-async.circuit-breaker.open-duration + [open_duration: | default = 5s] + + # Minimal requests to trigger the circuit breaker. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.set-async.circuit-breaker.min-requests + [min_requests: | default = 50] + + # Consecutive failures to determine if the circuit breaker should + # open. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.set-async.circuit-breaker.consecutive-failures + [consecutive_failures: | default = 5] + + # Failure percentage to determine if the circuit breaker should open. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.set-async.circuit-breaker.failure-percent + [failure_percent: | default = 0.05] + + redis: + # Comma separated list of redis addresses. Supported prefixes are: dns+ + # (looked up as an A/AAAA query), dnssrv+ (looked up as a SRV query, + # dnssrvnoa+ (looked up as a SRV query, with no A/AAAA lookup made after + # that). + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.addresses + [addresses: | default = ""] + + # Redis username. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.username + [username: | default = ""] + + # Redis password. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.password + [password: | default = ""] + + # Database to be selected after connecting to the server. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.db + [db: | default = 0] + + # Specifies the master's name. Must be not empty for Redis Sentinel. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.master-name + [master_name: | default = ""] + + # The maximum number of concurrent GetMulti() operations. If set to 0, + # concurrency is unlimited. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.max-get-multi-concurrency + [max_get_multi_concurrency: | default = 100] + + # The maximum size per batch for mget. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.get-multi-batch-size + [get_multi_batch_size: | default = 100] + + # The maximum number of concurrent SetMulti() operations. If set to 0, + # concurrency is unlimited. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.max-set-multi-concurrency + [max_set_multi_concurrency: | default = 100] + + # The maximum size per batch for pipeline set. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.set-multi-batch-size + [set_multi_batch_size: | default = 100] + + # The maximum number of concurrent asynchronous operations can occur. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.max-async-concurrency + [max_async_concurrency: | default = 3] + + # The maximum number of enqueued asynchronous operations allowed. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.max-async-buffer-size + [max_async_buffer_size: | default = 10000] + + # Client dial timeout. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.dial-timeout + [dial_timeout: | default = 5s] + + # Client read timeout. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.read-timeout + [read_timeout: | default = 3s] + + # Client write timeout. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.write-timeout + [write_timeout: | default = 3s] + + # Whether to enable tls for redis connection. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.tls-enabled + [tls_enabled: | default = false] + + # Path to the client certificate file, which will be used for + # authenticating with the server. Also requires the key path to be + # configured. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.tls-cert-path + [tls_cert_path: | default = ""] + + # Path to the key file for the client certificate. Also requires the + # client certificate to be configured. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.tls-key-path + [tls_key_path: | default = ""] + + # Path to the CA certificates file to validate server certificate + # against. If not set, the host's root CA certificates are used. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.tls-ca-path + [tls_ca_path: | default = ""] + + # Override the expected name on the server certificate. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.tls-server-name + [tls_server_name: | default = ""] + + # Skip validating server certificate. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.tls-insecure-skip-verify + [tls_insecure_skip_verify: | default = false] + + # If not zero then client-side caching is enabled. Client-side caching + # is when data is stored in memory instead of fetching data each time. + # See https://redis.io/docs/manual/client-side-caching/ for more info. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.cache-size + [cache_size: | default = 0] + + set_async_circuit_breaker_config: + # If true, enable circuit breaker. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.set-async.circuit-breaker.enabled + [enabled: | default = false] + + # Maximum number of requests allowed to pass through when the circuit + # breaker is half-open. If set to 0, by default it allows 1 request. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.set-async.circuit-breaker.half-open-max-requests + [half_open_max_requests: | default = 10] + + # Period of the open state after which the state of the circuit + # breaker becomes half-open. If set to 0, by default open duration is + # 60 seconds. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.set-async.circuit-breaker.open-duration + [open_duration: | default = 5s] + + # Minimal requests to trigger the circuit breaker. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.set-async.circuit-breaker.min-requests + [min_requests: | default = 50] + + # Consecutive failures to determine if the circuit breaker should + # open. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.set-async.circuit-breaker.consecutive-failures + [consecutive_failures: | default = 5] + + # Failure percentage to determine if the circuit breaker should open. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.set-async.circuit-breaker.failure-percent + [failure_percent: | default = 0.05] + + multilevel: + # The maximum number of concurrent asynchronous operations can occur + # when backfilling cache items. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.multilevel.max-async-concurrency + [max_async_concurrency: | default = 3] + + # The maximum number of enqueued asynchronous operations allowed when + # backfilling cache items. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.multilevel.max-async-buffer-size + [max_async_buffer_size: | default = 10000] + + # The maximum number of items to backfill per asynchronous operation. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.multilevel.max-backfill-items + [max_backfill_items: | default = 10000] + + # Size of each subrange that bucket object is split into for better + # caching. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.subrange-size + [subrange_size: | default = 16000] + + # Maximum number of sub-GetRange requests that a single GetRange request + # can be split into when fetching parquet labels file. Zero or negative + # value = unlimited number of sub-requests. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.max-get-range-requests + [max_get_range_requests: | default = 3] + + # TTL for caching object attributes for parquet labels file. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.attributes-ttl + [attributes_ttl: | default = 168h] + + # TTL for caching individual subranges. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.subrange-ttl + [subrange_ttl: | default = 24h] + # Maximum number of entries in the regex matchers cache. 0 to disable. # CLI flag: -blocks-storage.bucket-store.matchers-cache-max-items [matchers_cache_max_items: | default = 0] diff --git a/docs/configuration/arguments.md b/docs/configuration/arguments.md index 943d319aee..a99fe4dace 100644 --- a/docs/configuration/arguments.md +++ b/docs/configuration/arguments.md @@ -73,7 +73,7 @@ The next three options only apply when the querier is used together with the Que - `-frontend.forward-headers-list` - Request headers forwarded by query frontend to downstream queriers. Multiple headers may be specified. Defaults to empty. + Request headers forwarded by query frontend to downstream queriers. Multiple headers may be specified. Defaults to empty. - `-frontend.max-cache-freshness` @@ -113,7 +113,7 @@ The next three options only apply when the querier is used together with the Que Enable the distributors HA tracker so that it can accept samples from Prometheus HA replicas gracefully (requires labels). Global (for distributors), this ensures that the necessary internal data structures for the HA handling are created. The option `enable-for-all-users` is still needed to enable ingestion of HA samples for all users. - `distributor.drop-label` - This flag can be used to specify label names that to drop during sample ingestion within the distributor and can be repeated in order to drop multiple labels. + This flag can be used to specify label names to drop during sample ingestion within the distributor and can be repeated in order to drop multiple labels. ### Ring/HA Tracker Store @@ -123,7 +123,7 @@ The KVStore client is used by both the Ring and HA Tracker (HA Tracker doesn't s - `{ring,distributor.ha-tracker}.store` Backend storage to use for the HA Tracker (consul, etcd, inmemory, multi). - **Warning:** The `inmemory` store will not work correctly with multiple distributors as each distributor can have a different state, causing injestion errors. + **Warning:** The `inmemory` store will not work correctly with multiple distributors as each distributor can have a different state, causing ingestion errors. - `{ring,distributor.ring}.store` Backend storage to use for the Ring (consul, etcd, inmemory, memberlist, multi). @@ -162,8 +162,8 @@ prefix these flags with `distributor.ha-tracker.` The trusted CA file path. - `etcd.tls-insecure-skip-verify` Skip validating server certificate. -- `etcd.ping-without-stream-allowd'` - Enable/Disable PermitWithoutStream parameter +- `etcd.ping-without-stream-allowed` + Enable/Disable PermitWithoutStream parameter #### memberlist @@ -178,7 +178,7 @@ All nodes run the following two loops: 1. Every "gossip interval", pick random "gossip nodes" number of nodes, and send recent ring updates to them. 2. Every "push/pull sync interval", choose random single node, and exchange full ring information with it (push/pull sync). After this operation, rings on both nodes are the same. -When a node receives a ring update, node will merge it into its own ring state, and if that resulted in a change, node will add that update to the list of gossiped updates. +When a node receives a ring update, the node will merge it into its own ring state, and if that resulted in a change, the node will add that update to the list of gossiped updates. Such update will be gossiped `R * log(N+1)` times by this node (R = retransmit multiplication factor, N = number of gossiping nodes in the cluster). If you find the propagation to be too slow, there are some tuning possibilities (default values are memberlist settings for LAN networks): @@ -187,14 +187,14 @@ If you find the propagation to be too slow, there are some tuning possibilities - Decrease push/pull sync interval (default 30s) - Increase retransmit multiplication factor (default 4) -To find propagation delay, you can use `cortex_ring_oldest_member_timestamp{state="ACTIVE"}` metric. +To find propagation delay, you can use the `cortex_ring_oldest_member_timestamp{state="ACTIVE"}` metric. Flags for configuring KV store based on memberlist library: - `memberlist.nodename` Name of the node in memberlist cluster. Defaults to hostname. - `memberlist.randomize-node-name` - This flag adds extra random suffix to the node name used by memberlist. Defaults to true. Using random suffix helps to prevent issues when running multiple memberlist nodes on the same machine, or when node names are reused (eg. in stateful sets). + This flag adds an extra random suffix to the node name used by memberlist. Defaults to true. Using a random suffix helps to prevent issues when running multiple memberlist nodes on the same machine, or when node names are reused (e.g. in stateful sets). - `memberlist.retransmit-factor` Multiplication factor used when sending out messages (factor * log(N+1)). If not set, default value is used. - `memberlist.join` @@ -228,29 +228,29 @@ Flags for configuring KV store based on memberlist library: - `memberlist.gossip-to-dead-nodes-time` How long to keep gossiping to the nodes that seem to be dead. After this time, dead node is removed from list of nodes. If "dead" node appears again, it will simply join the cluster again, if its name is not reused by other node in the meantime. If the name has been reused, such a reanimated node will be ignored by other members. - `memberlist.dead-node-reclaim-time` - How soon can dead's node name be reused by a new node (using different IP). Disabled by default, name reclaim is not allowed until `gossip-to-dead-nodes-time` expires. This can be useful to set to low numbers when reusing node names, eg. in stateful sets. - If memberlist library detects that new node is trying to reuse the name of previous node, it will log message like this: `Conflicting address for ingester-6. Mine: 10.44.12.251:7946 Theirs: 10.44.12.54:7946 Old state: 2`. Node states are: "alive" = 0, "suspect" = 1 (doesn't respond, will be marked as dead if it doesn't respond), "dead" = 2. + How soon can a dead node's name be reused by a new node (using different IP). Disabled by default, name reclaim is not allowed until `gossip-to-dead-nodes-time` expires. This can be useful to set to low numbers when reusing node names, e.g. in stateful sets. + If memberlist library detects that a new node is trying to reuse the name of a previous node, it will log a message like this: `Conflicting address for ingester-6. Mine: 10.44.12.251:7946 Theirs: 10.44.12.54:7946 Old state: 2`. Node states are: "alive" = 0, "suspect" = 1 (doesn't respond, will be marked as dead if it doesn't respond), "dead" = 2. #### Multi KV -This is a special key-value implementation that uses two different KV stores (eg. consul, etcd or memberlist). One of them is always marked as primary, and all reads and writes go to primary store. Other one, secondary, is only used for writes. The idea is that operator can use multi KV store to migrate from primary to secondary store in runtime. +This is a special key-value implementation that uses two different KV stores (e.g. consul, etcd or memberlist). One of them is always marked as primary, and all reads and writes go to the primary store. The other one, secondary, is only used for writes. The idea is that an operator can use multi KV store to migrate from primary to secondary store at runtime. For example, migration from Consul to Etcd would look like this: - Set `ring.store` to use `multi` store. Set `-multi.primary=consul` and `-multi.secondary=etcd`. All consul and etcd settings must still be specified. -- Start all Cortex microservices. They will still use Consul as primary KV, but they will also write share ring via etcd. -- Operator can now use "runtime config" mechanism to switch primary store to etcd. -- After all Cortex microservices have picked up new primary store, and everything looks correct, operator can now shut down Consul, and modify Cortex configuration to use `-ring.store=etcd` only. +- Start all Cortex microservices. They will still use Consul as primary KV, but they will also share the ring via etcd. +- Operator can now use the "runtime config" mechanism to switch primary store to etcd. +- After all Cortex microservices have picked up the new primary store, and everything looks correct, operator can now shut down Consul, and modify Cortex configuration to use `-ring.store=etcd` only. - At this point, Consul can be shut down. -Multi KV has following parameters: +Multi KV has the following parameters: - `multi.primary` - name of primary KV store. Same values as in `ring.store` are supported, except `multi`. - `multi.secondary` - name of secondary KV store. - `multi.mirror-enabled` - enable mirroring of values to secondary store, defaults to true -- `multi.mirror-timeout` - wait max this time to write to secondary store to finish. Default to 2 seconds. Errors writing to secondary store are not reported to caller, but are logged and also reported via `cortex_multikv_mirror_write_errors_total` metric. +- `multi.mirror-timeout` - wait max this time for write to secondary store to finish. Defaults to 2 seconds. Errors writing to secondary store are not reported to caller, but are logged and also reported via `cortex_multikv_mirror_write_errors_total` metric. -Multi KV also reacts on changes done via runtime configuration. It uses this section: +Multi KV also reacts to changes done via runtime configuration. It uses this section: ```yaml multi_kv_config: @@ -268,7 +268,7 @@ HA tracking has two of its own flags: - `distributor.ha-tracker.replica` Prometheus label to look for in samples to identify a Prometheus HA replica. (default "`__replica__`") -It's reasonable to assume people probably already have a `cluster` label, or something similar. If not, they should add one along with `__replica__` via external labels in their Prometheus config. If you stick to these default values your Prometheus config could look like this (`POD_NAME` is an environment variable which must be set by you): +It's reasonable to assume people probably already have a `cluster` label, or something similar. If not, they should add one along with `__replica__` via external labels in their Prometheus config. If you stick to these default values, your Prometheus config could look like this (`POD_NAME` is an environment variable which must be set by you): ```yaml global: @@ -277,9 +277,9 @@ global: __replica__: $POD_NAME ``` -HA Tracking looks for the two labels (which can be overwritten per user) +HA Tracking looks for the two labels (which can be overridden per user). -It also talks to a KVStore and has it's own copies of the same flags used by the Distributor to connect to for the ring. +It also talks to a KVStore and has its own copies of the same flags used by the Distributor to connect to the ring. - `distributor.ha-tracker.failover-timeout` If we don't receive any samples from the accepted replica for a cluster in this amount of time we will failover to the next replica we receive a sample from. This value must be greater than the update timeout (default 30s) - `distributor.ha-tracker.store` @@ -307,9 +307,9 @@ It also talks to a KVStore and has it's own copies of the same flags used by the ## Runtime Configuration file -Cortex has a concept of "runtime config" file, which is simply a file that is reloaded while Cortex is running. It is used by some Cortex components to allow operator to change some aspects of Cortex configuration without restarting it. File is specified by using `-runtime-config.file=` flag and reload period (which defaults to 10 seconds) can be changed by `-runtime-config.reload-period=` flag. Previously this mechanism was only used by limits overrides, and flags were called `-limits.per-user-override-config=` and `-limits.per-user-override-period=10s` respectively. These are still used, if `-runtime-config.file=` is not specified. +Cortex has a concept of "runtime config" file, which is simply a file that is reloaded while Cortex is running. It is used by some Cortex components to allow an operator to change some aspects of Cortex configuration without restarting it. The file is specified by using the `-runtime-config.file=` flag and reload period (which defaults to 10 seconds) can be changed by the `-runtime-config.reload-period=` flag. Previously this mechanism was only used by limits overrides, and flags were called `-limits.per-user-override-config=` and `-limits.per-user-override-period=10s` respectively. These are still used, if `-runtime-config.file=` is not specified. -At the moment runtime configuration may contain per-user limits, multi KV store, and ingester instance limits. +At the moment, runtime configuration may contain per-user limits, multi KV store, and ingester instance limits. Example runtime configuration file: @@ -333,15 +333,15 @@ ingester_limits: max_inflight_push_requests: 10000 ``` -When running Cortex on Kubernetes, store this file in a config map and mount it in each services' containers. When changing the values there is no need to restart the services, unless otherwise specified. +When running Cortex on Kubernetes, store this file in a config map and mount it in each service's container. When changing the values there is no need to restart the services, unless otherwise specified. The `/runtime_config` endpoint returns the whole runtime configuration, including the overrides. In case you want to get only the non-default values of the configuration you can pass the `mode` parameter with the `diff` value. -## Ingester, Distributor & Querier limits. +## Ingester, Distributor & Querier limits -Cortex implements various limits on the requests it can process, in order to prevent a single tenant overwhelming the cluster. There are various default global limits which apply to all tenants which can be set on the command line. These limits can also be overridden on a per-tenant basis by using `overrides` field of runtime configuration file. +Cortex implements various limits on the requests it can process, in order to prevent a single tenant from overwhelming the cluster. There are various default global limits which apply to all tenants which can be set on the command line. These limits can also be overridden on a per-tenant basis by using the `overrides` field of the runtime configuration file. -The `overrides` field is a map of tenant ID (same values as passed in the `X-Scope-OrgID` header) to the various limits. An example could look like: +The `overrides` field is a map of tenant ID (same values as passed in the `X-Scope-OrgID` header) to the various limits. An example could look like: ```yaml overrides: @@ -363,9 +363,9 @@ Valid per-tenant limits are (with their corresponding flags for default values): The per-tenant rate limit (and burst size), in samples per second. It supports two strategies: `local` (default) and `global`. - The `local` strategy enforces the limit on a per distributor basis, actual effective rate limit will be N times higher, where N is the number of distributor replicas. + The `local` strategy enforces the limit on a per distributor basis; the actual effective rate limit will be N times higher, where N is the number of distributor replicas. - The `global` strategy enforces the limit globally, configuring a per-distributor local rate limiter as `ingestion_rate / N`, where N is the number of distributor replicas (it's automatically adjusted if the number of replicas change). The `ingestion_burst_size` refers to the per-distributor local rate limiter (even in the case of the `global` strategy) and should be set at least to the maximum number of samples expected in a single push request. For this reason, the `global` strategy requires that push requests are evenly distributed across the pool of distributors; if you use a load balancer in front of the distributors you should be already covered, while if you have a custom setup (ie. an authentication gateway in front) make sure traffic is evenly balanced across distributors. + The `global` strategy enforces the limit globally, configuring a per-distributor local rate limiter as `ingestion_rate / N`, where N is the number of distributor replicas (it's automatically adjusted if the number of replicas changes). The `ingestion_burst_size` refers to the per-distributor local rate limiter (even in the case of the `global` strategy) and should be set at least to the maximum number of samples expected in a single push request. For this reason, the `global` strategy requires that push requests are evenly distributed across the pool of distributors; if you use a load balancer in front of the distributors you should already be covered, while if you have a custom setup (i.e. an authentication gateway in front) make sure traffic is evenly balanced across distributors. The `global` strategy requires the distributors to form their own ring, which is used to keep track of the current number of healthy distributor replicas. The ring is configured by `distributor: { ring: {}}` / `-distributor.ring.*`. @@ -373,37 +373,37 @@ Valid per-tenant limits are (with their corresponding flags for default values): - `max_label_value_length` / `-validation.max-length-label-value` - `max_label_names_per_series` / `-validation.max-label-names-per-series` - Also enforced by the distributor, limits on the on length of labels and their values, and the total number of labels allowed per series. + Also enforced by the distributor; limits on the length of labels and their values, and the total number of labels allowed per series. - `reject_old_samples` / `-validation.reject-old-samples` - `reject_old_samples_max_age` / `-validation.reject-old-samples.max-age` - `creation_grace_period` / `-validation.create-grace-period` - Also enforce by the distributor, limits on how far in the past (and future) timestamps that we accept can be. + Also enforced by the distributor; limits on how far in the past (and future) timestamps that we accept can be. - `max_series_per_user` / `-ingester.max-series-per-user` - `max_series_per_metric` / `-ingester.max-series-per-metric` - Enforced by the ingesters; limits the number of active series a user (or a given metric) can have. When running with `-distributor.shard-by-all-labels=false` (the default), this limit will enforce the maximum number of series a metric can have 'globally', as all series for a single metric will be sent to the same replication set of ingesters. This is not the case when running with `-distributor.shard-by-all-labels=true`, so the actual limit will be N/RF times higher, where N is number of ingester replicas and RF is configured replication factor. + Enforced by the ingesters; limits the number of active series a user (or a given metric) can have. When running with `-distributor.shard-by-all-labels=false` (the default), this limit will enforce the maximum number of series a metric can have 'globally', as all series for a single metric will be sent to the same replication set of ingesters. This is not the case when running with `-distributor.shard-by-all-labels=true`, so the actual limit will be N/RF times higher, where N is the number of ingester replicas and RF is the configured replication factor. - `max_global_series_per_user` / `-ingester.max-global-series-per-user` - `max_global_series_per_metric` / `-ingester.max-global-series-per-metric` - Like `max_series_per_user` and `max_series_per_metric`, but the limit is enforced across the cluster. Each ingester is configured with a local limit based on the replication factor, the `-distributor.shard-by-all-labels` setting and the current number of healthy ingesters, and is kept updated whenever the number of ingesters change. + Like `max_series_per_user` and `max_series_per_metric`, but the limit is enforced across the cluster. Each ingester is configured with a local limit based on the replication factor, the `-distributor.shard-by-all-labels` setting and the current number of healthy ingesters, and is kept updated whenever the number of ingesters changes. Requires `-distributor.replication-factor`, `-distributor.shard-by-all-labels`, `-distributor.sharding-strategy` and `-distributor.zone-awareness-enabled` set for the ingesters too. - `max_metadata_per_user` / `-ingester.max-metadata-per-user` - `max_metadata_per_metric` / `-ingester.max-metadata-per-metric` - Enforced by the ingesters; limits the number of active metadata a user (or a given metric) can have. When running with `-distributor.shard-by-all-labels=false` (the default), this limit will enforce the maximum number of metadata a metric can have 'globally', as all metadata for a single metric will be sent to the same replication set of ingesters. This is not the case when running with `-distributor.shard-by-all-labels=true`, so the actual limit will be N/RF times higher, where N is number of ingester replicas and RF is configured replication factor. + Enforced by the ingesters; limits the number of active metadata a user (or a given metric) can have. When running with `-distributor.shard-by-all-labels=false` (the default), this limit will enforce the maximum number of metadata a metric can have 'globally', as all metadata for a single metric will be sent to the same replication set of ingesters. This is not the case when running with `-distributor.shard-by-all-labels=true`, so the actual limit will be N/RF times higher, where N is the number of ingester replicas and RF is the configured replication factor. - `max_fetched_series_per_query` / `querier.max-fetched-series-per-query` - When running Cortex with blocks storage this limit is enforced in the queriers on unique series fetched from ingesters and store-gateways (long-term storage). + When running Cortex with blocks storage, this limit is enforced in the queriers on unique series fetched from ingesters and store-gateways (long-term storage). - `max_global_metadata_per_user` / `-ingester.max-global-metadata-per-user` - `max_global_metadata_per_metric` / `-ingester.max-global-metadata-per-metric` - Like `max_metadata_per_user` and `max_metadata_per_metric`, but the limit is enforced across the cluster. Each ingester is configured with a local limit based on the replication factor, the `-distributor.shard-by-all-labels` setting and the current number of healthy ingesters, and is kept updated whenever the number of ingesters change. + Like `max_metadata_per_user` and `max_metadata_per_metric`, but the limit is enforced across the cluster. Each ingester is configured with a local limit based on the replication factor, the `-distributor.shard-by-all-labels` setting and the current number of healthy ingesters, and is kept updated whenever the number of ingesters changes. Requires `-distributor.replication-factor`, `-distributor.shard-by-all-labels`, `-distributor.sharding-strategy` and `-distributor.zone-awareness-enabled` set for the ingesters too. @@ -423,25 +423,25 @@ ingester_limits: Valid ingester instance limits are (with their corresponding flags): -- `max_ingestion_rate` \ `--ingester.instance-limits.max-ingestion-rate` +- `max_ingestion_rate` / `--ingester.instance-limits.max-ingestion-rate` Limit the ingestion rate in samples per second for an ingester. When this limit is reached, new requests will fail with an HTTP 500 error. -- `max_series` \ `-ingester.instance-limits.max-series` +- `max_series` / `-ingester.instance-limits.max-series` Limit the total number of series that an ingester keeps in memory, across all users. When this limit is reached, requests that create new series will fail with an HTTP 500 error. -- `max_tenants` \ `-ingester.instance-limits.max-tenants` +- `max_tenants` / `-ingester.instance-limits.max-tenants` Limit the maximum number of users an ingester will accept metrics for. When this limit is reached, requests from new users will fail with an HTTP 500 error. -- `max_inflight_push_requests` \ `-ingester.instance-limits.max-inflight-push-requests` +- `max_inflight_push_requests` / `-ingester.instance-limits.max-inflight-push-requests` Limit the maximum number of requests being handled by an ingester at once. This setting is critical for preventing ingesters from using an excessive amount of memory during high load or temporary slow downs. When this limit is reached, new requests will fail with an HTTP 500 error. ## DNS Service Discovery -Some clients in Cortex support service discovery via DNS to find addresses of backend servers to connect to (ie. caching servers). The clients supporting it are: +Some clients in Cortex support service discovery via DNS to find addresses of backend servers to connect to (i.e. caching servers). The clients supporting it are: - [Blocks storage's memcached cache](../blocks-storage/store-gateway.md#caching) - [All caching memcached servers](./config-file-reference.md#memcached-client-config) @@ -449,7 +449,7 @@ Some clients in Cortex support service discovery via DNS to find addresses of ba ### Supported discovery modes -The DNS service discovery, inspired from Thanos DNS SD, supports different discovery modes. A discovery mode is selected adding a specific prefix to the address. The supported prefixes are: +The DNS service discovery, inspired by Thanos DNS SD, supports different discovery modes. A discovery mode is selected by adding a specific prefix to the address. The supported prefixes are: - **`dns+`**
The domain name after the prefix is looked up as an A/AAAA query. For example: `dns+memcached.local:11211` @@ -458,13 +458,13 @@ The DNS service discovery, inspired from Thanos DNS SD, supports different disco - **`dnssrvnoa+`**
The domain name after the prefix is looked up as a SRV query, with no A/AAAA lookup made after that. For example: `dnssrvnoa+_memcached._tcp.memcached.namespace.svc.cluster.local` -If **no prefix** is provided, the provided IP or hostname will be used straightaway without pre-resolving it. +If **no prefix** is provided, the provided IP or hostname will be used directly without pre-resolving it. If you are using a managed memcached service from [Google Cloud](https://cloud.google.com/memorystore/docs/memcached/auto-discovery-overview), or [AWS](https://docs.aws.amazon.com/AmazonElastiCache/latest/mem-ug/AutoDiscovery.HowAutoDiscoveryWorks.html), use the [auto-discovery](./config-file-reference.md#memcached-client-config) flag instead of DNS discovery, then use the discovery/configuration endpoint as the domain name without any prefix. ## Logging of IP of reverse proxy -If a reverse proxy is used in front of Cortex it might be difficult to troubleshoot errors. The following 3 settings can be used to log the IP address passed along by the reverse proxy in headers like X-Forwarded-For. +If a reverse proxy is used in front of Cortex, it might be difficult to troubleshoot errors. The following 3 settings can be used to log the IP address passed along by the reverse proxy in headers like X-Forwarded-For. - `-server.log_source_ips_enabled` @@ -472,8 +472,8 @@ If a reverse proxy is used in front of Cortex it might be difficult to troublesh - `-server.log-source-ips-header` - Header field storing the source IPs. It is only used if `-server.log-source-ips-enabled` is true and if `-server.log-source-ips-regex` is set. If not set the default Forwarded, X-Real-IP or X-Forwarded-For headers are searched. + Header field storing the source IPs. It is only used if `-server.log-source-ips-enabled` is true and if `-server.log-source-ips-regex` is set. If not set, the default Forwarded, X-Real-IP or X-Forwarded-For headers are searched. - `-server.log-source-ips-regex` - Regular expression for matching the source IPs. It should contain at least one capturing group the first of which will be returned. Only used if `-server.log-source-ips-enabled` is true and if `-server.log-source-ips-header` is set. If not set the default Forwarded, X-Real-IP or X-Forwarded-For headers are searched. + Regular expression for matching the source IPs. It should contain at least one capturing group, the first of which will be returned. Only used if `-server.log-source-ips-enabled` is true and if `-server.log-source-ips-header` is set. If not set, the default Forwarded, X-Real-IP or X-Forwarded-For headers are searched. diff --git a/docs/configuration/config-file-reference.md b/docs/configuration/config-file-reference.md index 0ce98cb65a..a642c4f324 100644 --- a/docs/configuration/config-file-reference.md +++ b/docs/configuration/config-file-reference.md @@ -102,6 +102,10 @@ api: # CLI flag: -api.http-request-headers-to-log [http_request_headers_to_log: | default = []] + # HTTP header that can be used as request id + # CLI flag: -api.request-id-header + [request_id_header: | default = ""] + # Regex for CORS origin. It is fully anchored. Example: # 'https?://(domain1|domain2)\.com' # CLI flag: -server.cors-origin @@ -162,6 +166,110 @@ api: # The compactor_config configures the compactor for the blocks storage. [compactor: ] +parquet_converter: + # Maximum concurrent goroutines for downloading block metadata from object + # storage. + # CLI flag: -parquet-converter.meta-sync-concurrency + [meta_sync_concurrency: | default = 20] + + # How often to check for new TSDB blocks to convert to parquet format. + # CLI flag: -parquet-converter.conversion-interval + [conversion_interval: | default = 1m] + + # Maximum number of time series per parquet row group. Larger values improve + # compression but may reduce performance during reads. + # CLI flag: -parquet-converter.max-rows-per-row-group + [max_rows_per_row_group: | default = 1000000] + + # Enable disk-based write buffering to reduce memory consumption during + # parquet file generation. + # CLI flag: -parquet-converter.file-buffer-enabled + [file_buffer_enabled: | default = true] + + # Local directory path for caching TSDB blocks during parquet conversion. + # CLI flag: -parquet-converter.data-dir + [data_dir: | default = "./data"] + + ring: + kvstore: + # Backend storage to use for the ring. Supported values are: consul, + # dynamodb, etcd, inmemory, memberlist, multi. + # CLI flag: -parquet-converter.ring.store + [store: | default = "consul"] + + # The prefix for the keys in the store. Should end with a /. + # CLI flag: -parquet-converter.ring.prefix + [prefix: | default = "collectors/"] + + # The consul_config configures the consul client. + # The CLI flags prefix for this block config is: parquet-converter.ring + [consul: ] + + dynamodb: + # Region to access dynamodb. + # CLI flag: -parquet-converter.ring.dynamodb.region + [region: | default = ""] + + # Table name to use on dynamodb. + # CLI flag: -parquet-converter.ring.dynamodb.table-name + [table_name: | default = ""] + + # Time to expire items on dynamodb. + # CLI flag: -parquet-converter.ring.dynamodb.ttl-time + [ttl: | default = 0s] + + # Time to refresh local ring with information on dynamodb. + # CLI flag: -parquet-converter.ring.dynamodb.puller-sync-time + [puller_sync_time: | default = 1m] + + # Maximum number of retries for DDB KV CAS. + # CLI flag: -parquet-converter.ring.dynamodb.max-cas-retries + [max_cas_retries: | default = 10] + + # Timeout of dynamoDbClient requests. Default is 2m. + # CLI flag: -parquet-converter.ring.dynamodb.timeout + [timeout: | default = 2m] + + # The etcd_config configures the etcd client. + # The CLI flags prefix for this block config is: parquet-converter.ring + [etcd: ] + + multi: + # Primary backend storage used by multi-client. + # CLI flag: -parquet-converter.ring.multi.primary + [primary: | default = ""] + + # Secondary backend storage used by multi-client. + # CLI flag: -parquet-converter.ring.multi.secondary + [secondary: | default = ""] + + # Mirror writes to secondary store. + # CLI flag: -parquet-converter.ring.multi.mirror-enabled + [mirror_enabled: | default = false] + + # Timeout for storing value to secondary store. + # CLI flag: -parquet-converter.ring.multi.mirror-timeout + [mirror_timeout: | default = 2s] + + # Period at which to heartbeat to the ring. 0 = disabled. + # CLI flag: -parquet-converter.ring.heartbeat-period + [heartbeat_period: | default = 5s] + + # The heartbeat timeout after which parquet-converter are considered + # unhealthy within the ring. 0 = never (timeout disabled). + # CLI flag: -parquet-converter.ring.heartbeat-timeout + [heartbeat_timeout: | default = 1m] + + # Time since last heartbeat before parquet-converter will be removed from + # ring. 0 to disable + # CLI flag: -parquet-converter.auto-forget-delay + [auto_forget_delay: | default = 2m] + + # File path where tokens are stored. If empty, tokens are not stored at + # shutdown and restored at startup. + # CLI flag: -parquet-converter.ring.tokens-file-path + [tokens_file_path: | default = ""] + # The store_gateway_config configures the store-gateway service used by the # blocks storage. [store_gateway: ] @@ -343,8 +451,8 @@ The `alertmanager_config` configures the Cortex alertmanager. sharding_ring: # The key-value store used to share the hash ring across multiple instances. kvstore: - # Backend storage to use for the ring. Supported values are: consul, etcd, - # inmemory, memberlist, multi. + # Backend storage to use for the ring. Supported values are: consul, + # dynamodb, etcd, inmemory, memberlist, multi. # CLI flag: -alertmanager.sharding-ring.store [store: | default = "consul"] @@ -352,6 +460,10 @@ sharding_ring: # CLI flag: -alertmanager.sharding-ring.prefix [prefix: | default = "alertmanagers/"] + # The consul_config configures the consul client. + # The CLI flags prefix for this block config is: alertmanager.sharding-ring + [consul: ] + dynamodb: # Region to access dynamodb. # CLI flag: -alertmanager.sharding-ring.dynamodb.region @@ -377,10 +489,6 @@ sharding_ring: # CLI flag: -alertmanager.sharding-ring.dynamodb.timeout [timeout: | default = 2m] - # The consul_config configures the consul client. - # The CLI flags prefix for this block config is: alertmanager.sharding-ring - [consul: ] - # The etcd_config configures the etcd client. # The CLI flags prefix for this block config is: alertmanager.sharding-ring [etcd: ] @@ -425,6 +533,12 @@ sharding_ring: # CLI flag: -alertmanager.sharding-ring.tokens-file-path [tokens_file_path: | default = ""] + # Set to true to enable ring detailed metrics. These metrics provide detailed + # information, such as token count and ownership per tenant. Disabling them + # can significantly decrease the number of metrics emitted. + # CLI flag: -alertmanager.sharding-ring.detailed-metrics-enabled + [detailed_metrics_enabled: | default = true] + # The sleep seconds when alertmanager is shutting down. Need to be close to or # larger than KV Store information propagation delay # CLI flag: -alertmanager.sharding-ring.final-sleep @@ -1974,6 +2088,252 @@ bucket_store: # CLI flag: -blocks-storage.bucket-store.metadata-cache.partitioned-groups-list-ttl [partitioned_groups_list_ttl: | default = 0s] + parquet_labels_cache: + # The parquet labels cache backend type. Single or Multiple cache backend + # can be provided. Supported values in single cache: memcached, redis, + # inmemory, and '' (disable). Supported values in multi level cache: a + # comma-separated list of (inmemory, memcached, redis) + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.backend + [backend: | default = ""] + + inmemory: + # Maximum size in bytes of in-memory parquet-labels cache used (shared + # between all tenants). + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.inmemory.max-size-bytes + [max_size_bytes: | default = 1073741824] + + memcached: + # Comma separated list of memcached addresses. Supported prefixes are: + # dns+ (looked up as an A/AAAA query), dnssrv+ (looked up as a SRV query, + # dnssrvnoa+ (looked up as a SRV query, with no A/AAAA lookup made after + # that). + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.addresses + [addresses: | default = ""] + + # The socket read/write timeout. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.timeout + [timeout: | default = 100ms] + + # The maximum number of idle connections that will be maintained per + # address. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.max-idle-connections + [max_idle_connections: | default = 16] + + # The maximum number of concurrent asynchronous operations can occur. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.max-async-concurrency + [max_async_concurrency: | default = 3] + + # The maximum number of enqueued asynchronous operations allowed. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.max-async-buffer-size + [max_async_buffer_size: | default = 10000] + + # The maximum number of concurrent connections running get operations. If + # set to 0, concurrency is unlimited. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.max-get-multi-concurrency + [max_get_multi_concurrency: | default = 100] + + # The maximum number of keys a single underlying get operation should run. + # If more keys are specified, internally keys are split into multiple + # batches and fetched concurrently, honoring the max concurrency. If set + # to 0, the max batch size is unlimited. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.max-get-multi-batch-size + [max_get_multi_batch_size: | default = 0] + + # The maximum size of an item stored in memcached. Bigger items are not + # stored. If set to 0, no maximum size is enforced. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.max-item-size + [max_item_size: | default = 1048576] + + # Use memcached auto-discovery mechanism provided by some cloud provider + # like GCP and AWS + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.auto-discovery + [auto_discovery: | default = false] + + set_async_circuit_breaker_config: + # If true, enable circuit breaker. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.set-async.circuit-breaker.enabled + [enabled: | default = false] + + # Maximum number of requests allowed to pass through when the circuit + # breaker is half-open. If set to 0, by default it allows 1 request. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.set-async.circuit-breaker.half-open-max-requests + [half_open_max_requests: | default = 10] + + # Period of the open state after which the state of the circuit breaker + # becomes half-open. If set to 0, by default open duration is 60 + # seconds. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.set-async.circuit-breaker.open-duration + [open_duration: | default = 5s] + + # Minimal requests to trigger the circuit breaker. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.set-async.circuit-breaker.min-requests + [min_requests: | default = 50] + + # Consecutive failures to determine if the circuit breaker should open. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.set-async.circuit-breaker.consecutive-failures + [consecutive_failures: | default = 5] + + # Failure percentage to determine if the circuit breaker should open. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.set-async.circuit-breaker.failure-percent + [failure_percent: | default = 0.05] + + redis: + # Comma separated list of redis addresses. Supported prefixes are: dns+ + # (looked up as an A/AAAA query), dnssrv+ (looked up as a SRV query, + # dnssrvnoa+ (looked up as a SRV query, with no A/AAAA lookup made after + # that). + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.addresses + [addresses: | default = ""] + + # Redis username. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.username + [username: | default = ""] + + # Redis password. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.password + [password: | default = ""] + + # Database to be selected after connecting to the server. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.db + [db: | default = 0] + + # Specifies the master's name. Must be not empty for Redis Sentinel. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.master-name + [master_name: | default = ""] + + # The maximum number of concurrent GetMulti() operations. If set to 0, + # concurrency is unlimited. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.max-get-multi-concurrency + [max_get_multi_concurrency: | default = 100] + + # The maximum size per batch for mget. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.get-multi-batch-size + [get_multi_batch_size: | default = 100] + + # The maximum number of concurrent SetMulti() operations. If set to 0, + # concurrency is unlimited. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.max-set-multi-concurrency + [max_set_multi_concurrency: | default = 100] + + # The maximum size per batch for pipeline set. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.set-multi-batch-size + [set_multi_batch_size: | default = 100] + + # The maximum number of concurrent asynchronous operations can occur. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.max-async-concurrency + [max_async_concurrency: | default = 3] + + # The maximum number of enqueued asynchronous operations allowed. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.max-async-buffer-size + [max_async_buffer_size: | default = 10000] + + # Client dial timeout. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.dial-timeout + [dial_timeout: | default = 5s] + + # Client read timeout. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.read-timeout + [read_timeout: | default = 3s] + + # Client write timeout. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.write-timeout + [write_timeout: | default = 3s] + + # Whether to enable tls for redis connection. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.tls-enabled + [tls_enabled: | default = false] + + # Path to the client certificate file, which will be used for + # authenticating with the server. Also requires the key path to be + # configured. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.tls-cert-path + [tls_cert_path: | default = ""] + + # Path to the key file for the client certificate. Also requires the + # client certificate to be configured. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.tls-key-path + [tls_key_path: | default = ""] + + # Path to the CA certificates file to validate server certificate against. + # If not set, the host's root CA certificates are used. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.tls-ca-path + [tls_ca_path: | default = ""] + + # Override the expected name on the server certificate. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.tls-server-name + [tls_server_name: | default = ""] + + # Skip validating server certificate. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.tls-insecure-skip-verify + [tls_insecure_skip_verify: | default = false] + + # If not zero then client-side caching is enabled. Client-side caching is + # when data is stored in memory instead of fetching data each time. See + # https://redis.io/docs/manual/client-side-caching/ for more info. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.cache-size + [cache_size: | default = 0] + + set_async_circuit_breaker_config: + # If true, enable circuit breaker. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.set-async.circuit-breaker.enabled + [enabled: | default = false] + + # Maximum number of requests allowed to pass through when the circuit + # breaker is half-open. If set to 0, by default it allows 1 request. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.set-async.circuit-breaker.half-open-max-requests + [half_open_max_requests: | default = 10] + + # Period of the open state after which the state of the circuit breaker + # becomes half-open. If set to 0, by default open duration is 60 + # seconds. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.set-async.circuit-breaker.open-duration + [open_duration: | default = 5s] + + # Minimal requests to trigger the circuit breaker. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.set-async.circuit-breaker.min-requests + [min_requests: | default = 50] + + # Consecutive failures to determine if the circuit breaker should open. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.set-async.circuit-breaker.consecutive-failures + [consecutive_failures: | default = 5] + + # Failure percentage to determine if the circuit breaker should open. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.set-async.circuit-breaker.failure-percent + [failure_percent: | default = 0.05] + + multilevel: + # The maximum number of concurrent asynchronous operations can occur when + # backfilling cache items. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.multilevel.max-async-concurrency + [max_async_concurrency: | default = 3] + + # The maximum number of enqueued asynchronous operations allowed when + # backfilling cache items. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.multilevel.max-async-buffer-size + [max_async_buffer_size: | default = 10000] + + # The maximum number of items to backfill per asynchronous operation. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.multilevel.max-backfill-items + [max_backfill_items: | default = 10000] + + # Size of each subrange that bucket object is split into for better caching. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.subrange-size + [subrange_size: | default = 16000] + + # Maximum number of sub-GetRange requests that a single GetRange request can + # be split into when fetching parquet labels file. Zero or negative value = + # unlimited number of sub-requests. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.max-get-range-requests + [max_get_range_requests: | default = 3] + + # TTL for caching object attributes for parquet labels file. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.attributes-ttl + [attributes_ttl: | default = 168h] + + # TTL for caching individual subranges. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.subrange-ttl + [subrange_ttl: | default = 24h] + # Maximum number of entries in the regex matchers cache. 0 to disable. # CLI flag: -blocks-storage.bucket-store.matchers-cache-max-items [matchers_cache_max_items: | default = 0] @@ -2350,8 +2710,8 @@ The `compactor_config` configures the compactor for the blocks storage. sharding_ring: kvstore: - # Backend storage to use for the ring. Supported values are: consul, etcd, - # inmemory, memberlist, multi. + # Backend storage to use for the ring. Supported values are: consul, + # dynamodb, etcd, inmemory, memberlist, multi. # CLI flag: -compactor.ring.store [store: | default = "consul"] @@ -2359,6 +2719,10 @@ sharding_ring: # CLI flag: -compactor.ring.prefix [prefix: | default = "collectors/"] + # The consul_config configures the consul client. + # The CLI flags prefix for this block config is: compactor.ring + [consul: ] + dynamodb: # Region to access dynamodb. # CLI flag: -compactor.ring.dynamodb.region @@ -2384,10 +2748,6 @@ sharding_ring: # CLI flag: -compactor.ring.dynamodb.timeout [timeout: | default = 2m] - # The consul_config configures the consul client. - # The CLI flags prefix for this block config is: compactor.ring - [consul: ] - # The etcd_config configures the etcd client. # The CLI flags prefix for this block config is: compactor.ring [etcd: ] @@ -2423,6 +2783,12 @@ sharding_ring: # CLI flag: -compactor.auto-forget-delay [auto_forget_delay: | default = 2m] + # Set to true to enable ring detailed metrics. These metrics provide detailed + # information, such as token count and ownership per tenant. Disabling them + # can significantly decrease the number of metrics emitted. + # CLI flag: -compactor.ring.detailed-metrics-enabled + [detailed_metrics_enabled: | default = true] + # Minimum time to wait for ring stability at startup. 0 to disable. # CLI flag: -compactor.ring.wait-stability-min-duration [wait_stability_min_duration: | default = 1m] @@ -2573,6 +2939,7 @@ The `consul_config` configures the consul client. The supported CLI flags ` | default = "consul"] @@ -2681,6 +3048,10 @@ ha_tracker: # CLI flag: -distributor.ha-tracker.prefix [prefix: | default = "ha-tracker/"] + # The consul_config configures the consul client. + # The CLI flags prefix for this block config is: distributor.ha-tracker + [consul: ] + dynamodb: # Region to access dynamodb. # CLI flag: -distributor.ha-tracker.dynamodb.region @@ -2706,10 +3077,6 @@ ha_tracker: # CLI flag: -distributor.ha-tracker.dynamodb.timeout [timeout: | default = 2m] - # The consul_config configures the consul client. - # The CLI flags prefix for this block config is: distributor.ha-tracker - [consul: ] - # The etcd_config configures the etcd client. # The CLI flags prefix for this block config is: distributor.ha-tracker [etcd: ] @@ -2773,10 +3140,15 @@ ha_tracker: # CLI flag: -distributor.use-stream-push [use_stream_push: | default = false] +# EXPERIMENTAL: If true, accept prometheus remote write v2 protocol push +# request. +# CLI flag: -distributor.remote-writev2-enabled +[remote_writev2_enabled: | default = false] + ring: kvstore: - # Backend storage to use for the ring. Supported values are: consul, etcd, - # inmemory, memberlist, multi. + # Backend storage to use for the ring. Supported values are: consul, + # dynamodb, etcd, inmemory, memberlist, multi. # CLI flag: -distributor.ring.store [store: | default = "consul"] @@ -2784,6 +3156,10 @@ ring: # CLI flag: -distributor.ring.prefix [prefix: | default = "collectors/"] + # The consul_config configures the consul client. + # The CLI flags prefix for this block config is: distributor.ring + [consul: ] + dynamodb: # Region to access dynamodb. # CLI flag: -distributor.ring.dynamodb.region @@ -2809,10 +3185,6 @@ ring: # CLI flag: -distributor.ring.dynamodb.timeout [timeout: | default = 2m] - # The consul_config configures the consul client. - # The CLI flags prefix for this block config is: distributor.ring - [consul: ] - # The etcd_config configures the etcd client. # The CLI flags prefix for this block config is: distributor.ring [etcd: ] @@ -2843,6 +3215,12 @@ ring: # CLI flag: -distributor.ring.heartbeat-timeout [heartbeat_timeout: | default = 1m] + # Set to true to enable ring detailed metrics. These metrics provide detailed + # information, such as token count and ownership per tenant. Disabling them + # can significantly decrease the number of metrics emitted. + # CLI flag: -distributor.ring.detailed-metrics-enabled + [detailed_metrics_enabled: | default = true] + # Name of network interface to read address from. # CLI flag: -distributor.ring.instance-interface-names [instance_interface_names: | default = [eth0 en0]] @@ -2883,6 +3261,15 @@ otlp: # https://github.com/prometheus/OpenMetrics/blob/main/specification/OpenMetrics.md#supporting-target-metadata-in-both-push-based-and-pull-based-systems) # CLI flag: -distributor.otlp.disable-target-info [disable_target_info: | default = false] + + # EXPERIMENTAL: If true, delta temporality otlp metrics to be ingested. + # CLI flag: -distributor.otlp.allow-delta-temporality + [allow_delta_temporality: | default = false] + + # EXPERIMENTAL: If true, the '__type__' and '__unit__' labels are added for + # the OTLP metrics. + # CLI flag: -distributor.otlp.enable-type-and-unit-labels + [enable_type_and_unit_labels: | default = false] ``` ### `etcd_config` @@ -2894,6 +3281,7 @@ The `etcd_config` configures the etcd client. The supported CLI flags `` - `compactor.ring` - `distributor.ha-tracker` - `distributor.ring` +- `parquet-converter.ring` - `ruler.ring` - `store-gateway.sharding-ring` @@ -3097,6 +3485,10 @@ grpc_client_config: # using default gRPC client connect timeout 20s. # CLI flag: -querier.frontend-client.connect-timeout [connect_timeout: | default = 5s] + +# Name of network interface to read address from. +# CLI flag: -querier.instance-interface-names +[instance_interface_names: | default = [eth0 en0]] ``` ### `ingester_config` @@ -3107,8 +3499,8 @@ The `ingester_config` configures the Cortex ingester. lifecycler: ring: kvstore: - # Backend storage to use for the ring. Supported values are: consul, etcd, - # inmemory, memberlist, multi. + # Backend storage to use for the ring. Supported values are: consul, + # dynamodb, etcd, inmemory, memberlist, multi. # CLI flag: -ring.store [store: | default = "consul"] @@ -3116,6 +3508,9 @@ lifecycler: # CLI flag: -ring.prefix [prefix: | default = "collectors/"] + # The consul_config configures the consul client. + [consul: ] + dynamodb: # Region to access dynamodb. # CLI flag: -dynamodb.region @@ -3141,9 +3536,6 @@ lifecycler: # CLI flag: -dynamodb.timeout [timeout: | default = 2m] - # The consul_config configures the consul client. - [consul: ] - # The etcd_config configures the etcd client. [etcd: ] @@ -3343,12 +3735,6 @@ instance_limits: query_protection: rejection: - # EXPERIMENTAL: Enable query rejection feature, where the component return - # 503 to all incoming query requests when the configured thresholds are - # breached. - # CLI flag: -ingester.query-protection.rejection.enabled - [enabled: | default = false] - threshold: # EXPERIMENTAL: Max CPU utilization that this ingester can reach before # rejecting new query request (across all tenants) in percentage, between @@ -3731,10 +4117,31 @@ The `limits_config` configures default and per-tenant limits imposed by Cortex s # CLI flag: -frontend.max-queriers-per-tenant [max_queriers_per_tenant: | default = 0] +# [Experimental] Number of shards to use when distributing shardable PromQL +# queries. +# CLI flag: -frontend.query-vertical-shard-size +[query_vertical_shard_size: | default = 0] + # Enable to allow queries to be evaluated with data from a single zone, if other # zones are not available. [query_partial_data: | default = false] +# The maximum number of rows that can be fetched when querying parquet storage. +# Each row maps to a series in a parquet file. This limit applies before +# materializing chunks. 0 to disable. +# CLI flag: -querier.parquet-queryable.max-fetched-row-count +[parquet_max_fetched_row_count: | default = 0] + +# The maximum number of bytes that can be used to fetch chunk column pages when +# querying parquet storage. 0 to disable. +# CLI flag: -querier.parquet-queryable.max-fetched-chunk-bytes +[parquet_max_fetched_chunk_bytes: | default = 0] + +# The maximum number of bytes that can be used to fetch all column pages when +# querying parquet storage. 0 to disable. +# CLI flag: -querier.parquet-queryable.max-fetched-data-bytes +[parquet_max_fetched_data_bytes: | default = 0] + # Maximum number of outstanding requests per tenant per request queue (either # query frontend or query scheduler); requests beyond this error with HTTP 429. # CLI flag: -frontend.max-outstanding-requests-per-tenant @@ -3833,6 +4240,23 @@ query_rejection: # CLI flag: -compactor.partition-series-count [compactor_partition_series_count: | default = 0] +# If set, enables the Parquet converter to create the parquet files. +# CLI flag: -parquet-converter.enabled +[parquet_converter_enabled: | default = false] + +# The default tenant's shard size when the shuffle-sharding strategy is used by +# the parquet converter. When this setting is specified in the per-tenant +# overrides, a value of 0 disables shuffle sharding for the tenant. If the value +# is < 1 and > 0 the shard size will be a percentage of the total parquet +# converters. +# CLI flag: -parquet-converter.tenant-shard-size +[parquet_converter_tenant_shard_size: | default = 0] + +# Additional label names for specific tenants to sort by after metric name, in +# order of precedence. These are applied during Parquet file generation. +# CLI flag: -parquet-converter.sort-columns +[parquet_converter_sort_columns: | default = []] + # S3 server-side encryption type. Required to enable server-side encryption # overrides for a specific tenant. If not set, the default S3 client settings # are used. @@ -4177,7 +4601,7 @@ The `querier_config` configures the Cortex querier. [per_step_stats_enabled: | default = false] # Use compression for metrics query API or instant and range query APIs. -# Supports 'gzip' and '' (disable compression) +# Supported compression 'gzip', 'snappy', 'zstd' and '' (disable compression) # CLI flag: -querier.response-compression [response_compression: | default = "gzip"] @@ -4328,6 +4752,29 @@ thanos_engine: # [Experimental] If true, experimental promQL functions are enabled. # CLI flag: -querier.enable-promql-experimental-functions [enable_promql_experimental_functions: | default = false] + +# [Experimental] If true, querier will try to query the parquet files if +# available. +# CLI flag: -querier.enable-parquet-queryable +[enable_parquet_queryable: | default = false] + +# [Experimental] Maximum size of the Parquet queryable shard cache. 0 to +# disable. +# CLI flag: -querier.parquet-queryable-shard-cache-size +[parquet_queryable_shard_cache_size: | default = 512] + +# [Experimental] Parquet queryable's default block store to query. Valid options +# are tsdb and parquet. If it is set to tsdb, parquet queryable always fallback +# to store gateway. +# CLI flag: -querier.parquet-queryable-default-block-store +[parquet_queryable_default_block_store: | default = "parquet"] + +# [Experimental] Disable Parquet queryable to fallback queries to Store Gateway +# if the block is not available as Parquet files but available in TSDB. Setting +# this to true will disable the fallback and users can remove Store Gateway. But +# need to make sure Parquet files are created before it is queryable. +# CLI flag: -querier.parquet-queryable-fallback-disabled +[parquet_queryable_fallback_disabled: | default = false] ``` ### `query_frontend_config` @@ -4890,8 +5337,8 @@ alertmanager_client: ring: kvstore: - # Backend storage to use for the ring. Supported values are: consul, etcd, - # inmemory, memberlist, multi. + # Backend storage to use for the ring. Supported values are: consul, + # dynamodb, etcd, inmemory, memberlist, multi. # CLI flag: -ruler.ring.store [store: | default = "consul"] @@ -4899,6 +5346,10 @@ ring: # CLI flag: -ruler.ring.prefix [prefix: | default = "rulers/"] + # The consul_config configures the consul client. + # The CLI flags prefix for this block config is: ruler.ring + [consul: ] + dynamodb: # Region to access dynamodb. # CLI flag: -ruler.ring.dynamodb.region @@ -4924,10 +5375,6 @@ ring: # CLI flag: -ruler.ring.dynamodb.timeout [timeout: | default = 2m] - # The consul_config configures the consul client. - # The CLI flags prefix for this block config is: ruler.ring - [consul: ] - # The etcd_config configures the etcd client. # The CLI flags prefix for this block config is: ruler.ring [etcd: ] @@ -4973,6 +5420,12 @@ ring: # CLI flag: -ruler.ring.tokens-file-path [tokens_file_path: | default = ""] + # Set to true to enable ring detailed metrics. These metrics provide detailed + # information, such as token count and ownership per tenant. Disabling them + # can significantly decrease the number of metrics emitted. + # CLI flag: -ruler.ring.detailed-metrics-enabled + [detailed_metrics_enabled: | default = true] + # Name of network interface to read address from. # CLI flag: -ruler.ring.instance-interface-names [instance_interface_names: | default = [eth0 en0]] @@ -5831,6 +6284,11 @@ grpc_tls_config: # CLI flag: -server.grpc.keepalive.ping-without-stream-allowed [grpc_server_ping_without_stream_allowed: | default = true] +# Enable Channelz for gRPC server. A web UI will be also exposed on the HTTP +# server at /channelz +# CLI flag: -server.enable-channelz +[enable_channelz: | default = false] + # Output log messages in the given format. Valid formats: [logfmt, json] # CLI flag: -log.format [log_format: | default = "logfmt"] @@ -5902,8 +6360,8 @@ sharding_ring: # This option needs be set both on the store-gateway and querier when running # in microservices mode. kvstore: - # Backend storage to use for the ring. Supported values are: consul, etcd, - # inmemory, memberlist, multi. + # Backend storage to use for the ring. Supported values are: consul, + # dynamodb, etcd, inmemory, memberlist, multi. # CLI flag: -store-gateway.sharding-ring.store [store: | default = "consul"] @@ -5911,6 +6369,10 @@ sharding_ring: # CLI flag: -store-gateway.sharding-ring.prefix [prefix: | default = "collectors/"] + # The consul_config configures the consul client. + # The CLI flags prefix for this block config is: store-gateway.sharding-ring + [consul: ] + dynamodb: # Region to access dynamodb. # CLI flag: -store-gateway.sharding-ring.dynamodb.region @@ -5936,10 +6398,6 @@ sharding_ring: # CLI flag: -store-gateway.sharding-ring.dynamodb.timeout [timeout: | default = 2m] - # The consul_config configures the consul client. - # The CLI flags prefix for this block config is: store-gateway.sharding-ring - [consul: ] - # The etcd_config configures the etcd client. # The CLI flags prefix for this block config is: store-gateway.sharding-ring [etcd: ] @@ -5992,6 +6450,12 @@ sharding_ring: # CLI flag: -store-gateway.sharding-ring.keep-instance-in-the-ring-on-shutdown [keep_instance_in_the_ring_on_shutdown: | default = false] + # Set to true to enable ring detailed metrics. These metrics provide detailed + # information, such as token count and ownership per tenant. Disabling them + # can significantly decrease the number of metrics emitted. + # CLI flag: -store-gateway.sharding-ring.detailed-metrics-enabled + [detailed_metrics_enabled: | default = true] + # Minimum time to wait for ring stability at startup. 0 to disable. # CLI flag: -store-gateway.sharding-ring.wait-stability-min-duration [wait_stability_min_duration: | default = 1m] @@ -6039,12 +6503,6 @@ sharding_ring: query_protection: rejection: - # EXPERIMENTAL: Enable query rejection feature, where the component return - # 503 to all incoming query requests when the configured thresholds are - # breached. - # CLI flag: -store-gateway.query-protection.rejection.enabled - [enabled: | default = false] - threshold: # EXPERIMENTAL: Max CPU utilization that this ingester can reach before # rejecting new query request (across all tenants) in percentage, between diff --git a/docs/configuration/single-process-config-blocks-gossip-1.yaml b/docs/configuration/single-process-config-blocks-gossip-1.yaml index 7c7b3b515a..bf020fd9c9 100644 --- a/docs/configuration/single-process-config-blocks-gossip-1.yaml +++ b/docs/configuration/single-process-config-blocks-gossip-1.yaml @@ -1,4 +1,4 @@ - +# yaml-language-server: $schema=https://raw.githubusercontent.com/cortexproject/cortex/master/schemas/cortex-config-schema.json # Configuration for running Cortex in single-process mode. # This should not be used in production. It is only for getting started # and development. diff --git a/docs/configuration/single-process-config-blocks-gossip-2.yaml b/docs/configuration/single-process-config-blocks-gossip-2.yaml index 54dbf79548..44238df4fd 100644 --- a/docs/configuration/single-process-config-blocks-gossip-2.yaml +++ b/docs/configuration/single-process-config-blocks-gossip-2.yaml @@ -1,4 +1,4 @@ - +# yaml-language-server: $schema=https://raw.githubusercontent.com/cortexproject/cortex/master/schemas/cortex-config-schema.json # Configuration for running Cortex in single-process mode. # This should not be used in production. It is only for getting started # and development. diff --git a/docs/configuration/single-process-config-blocks-local.yaml b/docs/configuration/single-process-config-blocks-local.yaml index c6b97ae0ed..d6887dd753 100644 --- a/docs/configuration/single-process-config-blocks-local.yaml +++ b/docs/configuration/single-process-config-blocks-local.yaml @@ -1,4 +1,4 @@ - +# yaml-language-server: $schema=https://raw.githubusercontent.com/cortexproject/cortex/master/schemas/cortex-config-schema.json # Configuration for running Cortex in single-process mode. # This should not be used in production. It is only for getting started # and development. diff --git a/docs/configuration/single-process-config-blocks-tls.yaml b/docs/configuration/single-process-config-blocks-tls.yaml index 352bf7c8a0..8e18ce9bf5 100644 --- a/docs/configuration/single-process-config-blocks-tls.yaml +++ b/docs/configuration/single-process-config-blocks-tls.yaml @@ -1,4 +1,4 @@ - +# yaml-language-server: $schema=https://raw.githubusercontent.com/cortexproject/cortex/master/schemas/cortex-config-schema.json # Configuration for running Cortex in single-process mode. # This should not be used in production. It is only for getting started # and development. diff --git a/docs/configuration/v1-guarantees.md b/docs/configuration/v1-guarantees.md index 700fbf5beb..8825b19e2a 100644 --- a/docs/configuration/v1-guarantees.md +++ b/docs/configuration/v1-guarantees.md @@ -59,6 +59,7 @@ Currently experimental features are: - Distributor: - Do not extend writes on unhealthy ingesters (`-distributor.extend-writes=false`) - Accept multiple HA pairs in the same request (enabled via `-experimental.distributor.ha-tracker.mixed-ha-samples=true`) + - Accept Prometheus remote write 2.0 request (`-distributor.remote-writev2-enabled=true`) - Tenant Deletion in Purger, for blocks storage. - Query-frontend: query stats tracking (`-frontend.query-stats-enabled`) - Blocks storage bucket index @@ -116,6 +117,8 @@ Currently experimental features are: - `store-gateway.sharding-ring.final-sleep` (duration) CLI flag - `alertmanager-sharding-ring.final-sleep` (duration) CLI flag - OTLP Receiver + - Ingest delta temporality OTLP metrics (`-distributor.otlp.allow-delta-temporality=true`) + - Add `__type__` and `__unit__` labels (`-distributor.otlp.enable-type-and-unit-labels`) - Persistent tokens in the Ruler Ring: - `-ruler.ring.tokens-file-path` (path) CLI flag - Native Histograms diff --git a/docs/getting-started/cortex-config.yaml b/docs/getting-started/cortex-config.yaml index 6795ca6a2f..1b24084ad3 100644 --- a/docs/getting-started/cortex-config.yaml +++ b/docs/getting-started/cortex-config.yaml @@ -1,3 +1,4 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/cortexproject/cortex/master/schemas/cortex-config-schema.json # Configuration for running Cortex in single-process mode. # This should not be used in production. It is only for getting started # and development. diff --git a/docs/guides/open-telemetry-collector.md b/docs/guides/open-telemetry-collector.md index 40d2aa83bf..cd0e9bd7ad 100644 --- a/docs/guides/open-telemetry-collector.md +++ b/docs/guides/open-telemetry-collector.md @@ -1,8 +1,8 @@ --- -title: "OpenTelemetry Collector" -linkTitle: "OpenTelemetry Collector" +title: "Use OpenTelemetry Collector to send metrics to Cortex" +linkTitle: "Use OpenTelemetry Collector to send metrics to Cortex" weight: 10 -slug: opentelemetry-collector +slug: use-opentelemetry-collector-to-send-metrics-to-cortex --- This guide explains how to configure open-telemetry collector and OTLP(OpenTelemetry Protocol) configurations in the @@ -64,9 +64,22 @@ service: exporters: [otlphttp] ``` -## Configure OTLP +## Cortex configurations for ingesting OTLP metrics +You can configure OTLP-related flags in the config file. -### target_info metric +``` +limits: + promote_resource_attributes: +... +distributor: + otlp: + convert_all_attributes: + disable_target_info: + allow_delta_temporality: + enable_type_and_unit_labels: +``` + +### Ingest `target_info` metric By default, the [target_info](https://github.com/prometheus/OpenMetrics/blob/main/specification/OpenMetrics.md#supporting-target-metadata-in-both-push-based-and-pull-based-systems) @@ -117,6 +130,17 @@ distributor: disable_target_info: false ``` +### Ingest delta temporality OTLP metrics + +OpenTelemetry supports two temporalities, [Delta and Cumulative](https://opentelemetry.io/docs/specs/otel/metrics/data-model/#temporality). +By default, only the cumulative metrics can be ingested via OTLP endpoint in Cortex. +To enable the ingestion of OTLP metrics with delta temporality, set the `distributor.otlp.allow-delta-temporality` flag to `true`. + +### Enable `__type__` and `__unit__` label + +The `__type__` and `__unit__` labels are added to OTLP metrics if `distributor.otlp.enable-type-and-unit-labels` is set to `true`. +This flag is disabled by default. + ### Configure promote resource attributes per tenants The `promote_resource_attributes` is a [runtime config](./overrides-exporter.md) so you can configure it per tenant. diff --git a/docs/guides/parquet-mode.md b/docs/guides/parquet-mode.md new file mode 100644 index 0000000000..c68dd691ba --- /dev/null +++ b/docs/guides/parquet-mode.md @@ -0,0 +1,310 @@ +--- +title: "Parquet Mode" +linkTitle: "Parquet Mode" +weight: 11 +slug: parquet-mode +--- + +## Overview + +Parquet mode in Cortex provides an experimental feature that converts TSDB blocks to Parquet format for improved query performance and storage efficiency on older data. This feature is particularly beneficial for long-term storage scenarios where data is accessed less frequently but needs to be queried efficiently. + +The parquet mode consists of two main components: +- **Parquet Converter**: Converts TSDB blocks to Parquet format +- **Parquet Queryable**: Enables querying of Parquet files with fallback to TSDB blocks + +## Why Parquet Mode? + +Traditional TSDB format and Store Gateway architecture face significant challenges when dealing with long-term data storage on object storage: + +### TSDB Format Limitations +- **Random Read Intensive**: TSDB index relies heavily on random reads, where each read becomes a separate request to object storage +- **Overfetching**: To reduce object storage requests, data that are close together are merged in a sigle request, leading to higher bandwidth usage and overfetching +- **High Cardinality Bottlenecks**: Index postings can become a major bottleneck for high cardinality data + +### Store Gateway Operational Challenges +- **Resource Intensive**: Requires significant local disk space for index headers and high memory usage +- **Complex State Management**: Requires complex data sharding when scaling, which often leads to consistency and availability issues, as well as long startup times +- **Query Inefficiencies**: Single-threaded block processing leads to high latency for large blocks + +### Parquet Advantages +[Apache Parquet](https://parquet.apache.org/) addresses these challenges through: +- **Columnar Storage**: Data organized by columns reduces object storage requests as only specific columns need to be fetched +- **Data Locality**: Series that are likely to be queried together are co-located to minimize I/O operations +- **Stateless Design**: Rich file metadata eliminates the need for local state like index headers +- **Advanced Compression**: Reduces storage costs and improves query performance +- **Parallel Processing**: Row groups enable parallel processing for better scalability + +For more details on the design rationale, see the [Parquet Storage Proposal](../proposals/parquet-storage.md). + +## Architecture + +The parquet system works by: + +1. **Block Conversion**: The parquet converter runs periodically to identify TSDB blocks that should be converted to Parquet format +2. **Storage**: Parquet files are stored alongside TSDB blocks in object storage +3. **Querying**: The parquet queryable attempts to query Parquet files first, falling back to TSDB blocks when necessary +4. **Marker System**: Conversion status is tracked using marker files to avoid duplicate conversions + +## Configuration + +### Enabling Parquet Converter + +To enable the parquet converter service, add it to your target list: + +```yaml +target: parquet-converter +``` + +Or include it in a multi-target deployment: + +```yaml +target: all,parquet-converter +``` + +### Parquet Converter Configuration + +Configure the parquet converter in your Cortex configuration: + +```yaml +parquet_converter: + # Data directory for caching blocks during conversion + data_dir: "./data" + + # Frequency of conversion job execution + conversion_interval: 1m + + # Maximum rows per parquet row group + max_rows_per_row_group: 1000000 + + # Number of concurrent meta file sync operations + meta_sync_concurrency: 20 + + # Enable file buffering to reduce memory usage + file_buffer_enabled: true + + # Ring configuration for distributed conversion + ring: + kvstore: + store: consul + consul: + host: localhost:8500 + heartbeat_period: 5s + heartbeat_timeout: 1m + instance_addr: 127.0.0.1 + instance_port: 9095 +``` + +### Per-Tenant Parquet Settings + +Enable parquet conversion per tenant using limits: + +```yaml +limits: + # Enable parquet converter for all tenants + parquet_converter_enabled: true + + # Shard size for shuffle sharding (0 = disabled) + parquet_converter_tenant_shard_size: 0.8 + + # Defines sort columns applied during Parquet file generation for specific tenants + parquet_converter_sort_columns: ["label1", "label2"] +``` + +You can also configure per-tenant settings using runtime configuration: + +```yaml +overrides: + tenant-1: + parquet_converter_enabled: true + parquet_converter_tenant_shard_size: 2 + parquet_converter_sort_columns: ["cluster", "namespace"] + tenant-2: + parquet_converter_enabled: false +``` + +### Enabling Parquet Queryable + +To enable querying of Parquet files, configure the querier: + +```yaml +querier: + # Enable parquet queryable with fallback (experimental) + enable_parquet_queryable: true + + # Cache size for parquet shards + parquet_queryable_shard_cache_size: 512 + + # Default block store: "tsdb" or "parquet" + parquet_queryable_default_block_store: "parquet" + + # Disable fallback to TSDB blocks when parquet files are not available + parquet_queryable_fallback_disabled: false +``` + +### Query Limits for Parquet + +Configure query limits specific to parquet operations: + +```yaml +limits: + # Maximum number of rows that can be scanned per query + parquet_max_fetched_row_count: 1000000 + + # Maximum chunk bytes per query + parquet_max_fetched_chunk_bytes: 100_000_000 # 100MB + + # Maximum data bytes per query + parquet_max_fetched_data_bytes: 1_000_000_000 # 1GB +``` + +### Cache Configuration + +Parquet mode supports dedicated caching for both chunks and labels to improve query performance. Configure caching in the blocks storage section: + +```yaml +blocks_storage: + bucket_store: + # Chunks cache configuration for parquet data + chunks_cache: + backend: "memcached" # Options: "", "inmemory", "memcached", "redis" + subrange_size: 16000 # Size of each subrange for better caching + max_get_range_requests: 3 # Max sub-GetRange requests per GetRange call + attributes_ttl: 168h # TTL for caching object attributes + subrange_ttl: 24h # TTL for caching individual chunk subranges + + # Memcached configuration (if using memcached backend) + memcached: + addresses: "memcached:11211" + timeout: 500ms + max_idle_connections: 16 + max_async_concurrency: 10 + max_async_buffer_size: 10000 + max_get_multi_concurrency: 100 + max_get_multi_batch_size: 0 + + # Parquet labels cache configuration (experimental) + parquet_labels_cache: + backend: "memcached" # Options: "", "inmemory", "memcached", "redis" + subrange_size: 16000 # Size of each subrange for better caching + max_get_range_requests: 3 # Max sub-GetRange requests per GetRange call + attributes_ttl: 168h # TTL for caching object attributes + subrange_ttl: 24h # TTL for caching individual label subranges + + # Memcached configuration (if using memcached backend) + memcached: + addresses: "memcached:11211" + timeout: 500ms + max_idle_connections: 16 +``` + +#### Cache Backend Options + +- **Empty string ("")**: Disables caching +- **inmemory**: Uses in-memory cache (suitable for single-instance deployments) +- **memcached**: Uses Memcached for distributed caching (recommended for production) +- **redis**: Uses Redis for distributed caching +- **Multi-level**: Comma-separated list for multi-tier caching (e.g., "inmemory,memcached") + +#### Cache Performance Tuning + +- **subrange_size**: Smaller values increase cache hit rates but create more cache entries +- **max_get_range_requests**: Higher values reduce object storage requests but increase memory usage +- **TTL values**: Balance between cache freshness and hit rates based on your data patterns +- **Multi-level caching**: Use "inmemory,memcached" for L1/L2 cache hierarchy + +## Block Conversion Logic + +The parquet converter determines which blocks to convert based on: + +1. **Time Range**: Only blocks with time ranges larger than the base TSDB block duration (typically 2h) are converted +2. **Conversion Status**: Blocks are only converted once, tracked via marker files +3. **Tenant Settings**: Conversion must be enabled for the specific tenant + +The conversion process: +- Downloads TSDB blocks from object storage +- Converts time series data to Parquet format +- Uploads Parquet files (chunks and labels) to object storage +- Creates conversion marker files to track completion + +## Querying Behavior + +When parquet queryable is enabled: + +1. **Block Discovery**: The bucket index is used to discover available blocks + * The bucket index now contains metadata indicating whether parquet files are available for querying +1. **Query Execution**: Queries prioritize parquet files when available, falling back to TSDB blocks when parquet conversion is incomplete +1. **Hybrid Queries**: Supports querying both parquet and TSDB blocks within the same query operation +1. **Fallback Control**: When `parquet_queryable_fallback_disabled` is set to `true`, queries will fail with a consistency check error if any required blocks are not available as parquet files, ensuring strict parquet-only querying + +## Monitoring + +### Parquet Converter Metrics + +Monitor parquet converter operations: + +```promql +# Blocks converted +cortex_parquet_converter_blocks_converted_total + +# Conversion failures +cortex_parquet_converter_block_convert_failures_total + +# Delay in minutes of Parquet block to be converted from the TSDB block being uploaded to object store +cortex_parquet_converter_convert_block_delay_minutes +``` + +### Parquet Queryable Metrics + +Monitor parquet query performance: + +```promql +# Blocks queried by type +cortex_parquet_queryable_blocks_queried_total + +# Query operations +cortex_parquet_queryable_operations_total + +# Cache metrics +cortex_parquet_queryable_cache_hits_total +cortex_parquet_queryable_cache_misses_total +``` + +## Best Practices + +### Deployment Recommendations + +1. **Dedicated Converters**: Run parquet converters on dedicated instances for better resource isolation +2. **Ring Configuration**: Use a distributed ring for high availability and load distribution +3. **Storage Considerations**: Ensure sufficient disk space in `data_dir` for block processing +4. **Network Bandwidth**: Consider network bandwidth for downloading/uploading blocks + +### Performance Tuning + +1. **Row Group Size**: Adjust `max_rows_per_row_group` based on your query patterns +2. **Cache Size**: Tune `parquet_queryable_shard_cache_size` based on available memory +3. **Concurrency**: Adjust `meta_sync_concurrency` based on object storage performance +4. **Sort Columns**: Configure `parquet_converter_sort_columns` based on your most common query filters to improve query performance + +### Fallback Configuration + +1. **Gradual Migration**: Keep `parquet_queryable_fallback_disabled: false` (default) during initial deployment to allow queries to succeed even when parquet conversion is incomplete +2. **Strict Parquet Mode**: Set `parquet_queryable_fallback_disabled: true` only after ensuring all required blocks have been converted to parquet format +3. **Monitoring**: Monitor conversion progress and query failures before enabling strict parquet mode + +## Limitations + +1. **Experimental Feature**: Parquet mode is experimental and may have stability issues +2. **Storage Overhead**: Parquet files are stored in addition to TSDB blocks +3. **Conversion Latency**: There's a delay between block creation and parquet availability +4. **Shuffle Sharding Requirement**: Parquet mode only supports shuffle sharding as sharding strategy +5. **Bucket Index Dependency**: The bucket index must be enabled and properly configured as it provides essential metadata for parquet file discovery and query routing + +## Migration Considerations + +When enabling parquet mode: + +1. **Gradual Rollout**: Enable for specific tenants first +2. **Monitor Resources**: Watch CPU, memory, and storage usage +3. **Backup Strategy**: Ensure TSDB blocks remain available as fallback +4. **Testing**: Thoroughly test query patterns before production deployment diff --git a/docs/proposals/partition-ring-multi-az-replication.md b/docs/proposals/partition-ring-multi-az-replication.md new file mode 100644 index 0000000000..34afd856ec --- /dev/null +++ b/docs/proposals/partition-ring-multi-az-replication.md @@ -0,0 +1,209 @@ +--- +title: "Partition Ring with Multi-AZ Replication" +linkTitle: "Partition Ring Multi-AZ Replication" +weight: 1 +slug: partition-ring-multi-az-replication +--- + +- Author: [Daniel Blando](https://github.com/danielblando) +- Date: July 2025 +- Status: Proposed + +## Background + +Distributors use a token-based ring to shard data across ingesters. Each ingester owns random tokens (32-bit numbers) in a hash ring. For each incoming series, the distributor: + +1. Hashes the series labels to get a hash value +2. Finds the primary ingester (smallest token > hash value) +3. When replication is enabled, selects additional replicas by moving clockwise around the ring +4. Ensures replicas are distributed across different availability zones + +The issue arises when replication is enabled: each series in a request is hashed independently, causing each series to route to different groups of ingesters. + +```mermaid +graph TD + A[Write Request] --> B[Distributor] + B --> C[Hash Series 1] --> D[Ingesters: 5,7,9] + B --> E[Hash Series 2] --> F[Ingesters: 5,3,10] + B --> G[Hash Series 3] --> H[Ingesters: 7,27,28] + B --> I[...] --> J[Different ingester sets
for each series] +``` + +## Problem + +### Limited AZ Failure Tolerance with replication factor + +While the token ring effectively distributes load across the ingester fleet, the independent hashing and routing of each series creates an amplification effect where a single ingester failure can impact a large number of write requests. + +Consider a ring with 30 ingesters, each series gets distributed to three different ingesters: + +``` +Sample 1: {name="http_request_latency",api="/push", status="2xx"} + → Ingesters: ing-5, ing-7, ing-9 +Sample 2: {name="http_request_latency",api="/push", status="4xx"} + → Ingesters: ing-5, ing-3, ing-10 +Sample 3: {name="http_request_latency",api="/push", status="2xx"} + → Ingesters: ing-7, ing-27, ing-28 +... +``` +If ingesters `ing-15` and `ing-18` (in different AZs) are offline, any request containing a series that needs to write to both these ingesters will fail completely: + +``` +Sample 15: {name="http_request_latency",api="/push", status="5xx"} + → Ingesters: ing-10, ing-15, ing-18 // Request fails +``` + +With requests increasing their batch size, the probability of request failure becomes critical in replicated deployments. Given two failed ingesters in different AZs, each individual series has a small chance of requiring both failed ingesters. However, as request batch sizes increase, the probability that at least one series in the batch will hash to both failed ingesters approaches certainty. + +**Note**: This problem specifically affects Cortex using replication. Replication as 1 are not impacted by this availability amplification issue. + +## Proposed Solution + +### Partition Ring Architecture + +A new Partition Ring is proposed where the ring is divided into partitions, with each partition containing a set of tokens and a group of ingesters. Ingesters are allocated to partitions based on their order in the zonal StatefulSet, ensuring that scaling operations align with StatefulSet's LIFO behavior. Each partition contains a number of ingesters equal to the replication factor, with exactly one ingester per availability zone. + +This approach provides **reduced failure probability** where the chances of getting two ingesters in the same partition down decreases significantly compared to random ingester failures affecting multiple series. It also enables **deterministic replication** where data sent to `ing-az1-1` always replicates to `ing-az2-1` and `ing-az3-1`, making the system behavior more predictable and easier to troubleshoot. + +```mermaid +graph TD + subgraph "Partition Ring" + subgraph "Partition 3" + P1A[ing-az1-3] + P1B[ing-az2-3] + P1C[ing-az3-3] + end + subgraph "Partition 2" + P2A[ing-az1-2] + P2B[ing-az2-2] + P2C[ing-az3-2] + end + subgraph "Partition 1" + P3A[ing-az1-1] + P3B[ing-az2-1] + P3C[ing-az3-1] + end + end + + T1[Tokens 34] --> P1A + T2[Tokens 56] --> P2A + T3[Tokens 12] --> P3A +``` + +Within each partition, ingesters maintain identical data, acting as true replicas of each other. Distributors maintain similar hashing logic but select a partition instead of individual ingesters. Data is then forwarded to all ingesters within the selected partition, making the replication pattern deterministic. + +### Protocol Buffer Definitions + +```protobuf +message PartitionRingDesc { + map partitions = 1; +} + +message PartitionDesc { + PartitionState state = 1; + repeated uint32 tokens = 2; + map instances = 3; + int64 registered_timestamp = 4; +} + +// Unchanged from current implementation +message InstanceDesc { + string addr = 1; + int64 timestamp = 2; + InstanceState state = 3; + string zone = 7; + int64 registered_timestamp = 8; +} +``` + +### Partition States + +Partitions maintain a simplified state model that provides **clear ownership** where each series belongs to exactly one partition, but requires **additional state management** for partition states and lifecycle management: + +```go +type PartitionState int + +const ( + NON_READY PartitionState = iota // Insufficient ingesters + ACTIVE // Fully operational + READONLY // Scale-down in progress +) +``` + +State transitions: +```mermaid +stateDiagram-v2 + [*] --> NON_READY + NON_READY --> ACTIVE : Required ingesters joined
across all AZs + ACTIVE --> READONLY : Scale-down initiated + ACTIVE --> NON_READY : Ingester removed + READONLY --> NON_READY : Ingesters removed + NON_READY --> [*] : Partition deleted +``` + +### Partition Lifecycle Management + +#### Creating Partitions + +When a new ingester joins the ring: +1. Check if a suitable partition exists with available slots +2. If no partition exists, create a new partition in `NON_READY` state +3. Add partition's tokens to the ring +4. Add the ingester to the partition +5. Wait for required number of ingesters across all AZs (one per AZ) +6. Once all AZs are represented, transition partition to `ACTIVE` + +#### Removing Partitions + +The scale-down process follows these steps: +1. **Mark READONLY**: Partition stops accepting new writes but continues serving reads +2. **Data Transfer**: Wait for all ingesters in partition to transfer data and become empty +3. **Coordinated Removal**: Remove one ingester from each AZ simultaneously +4. **State Transition**: Partition automatically transitions to `NON_READY` (insufficient replicas) +5. **Cleanup**: Remove remaining ingesters and delete partition from ring + +If not using READONLY mode, removing an ingester will make the partition as NON_READY. When all ingesters are removed, the last will delete the partition if configuration `unregister_on_shutdown` is true + +### Multi-Ring Migration Strategy + +To address the migration challenge for production clusters currently running token-based rings, this proposal also introduces a multi-ring infrastructure that allows gradual traffic shifting from token-based to partition-based rings: + +```mermaid +sequenceDiagram + participant C as Client + participant D as Distributor + participant MR as Multi-Ring Router + participant TR as Token Ring + participant PR as Partition Ring + + C->>D: Write Request (1000 series) + D->>MR: Route request + MR->>MR: Check percentage config
(e.g., 80% token, 20% partition) + MR->>TR: Route 800 series to Token Ring + MR->>PR: Route 200 series to Partition Ring + + Note over TR,PR: Both rings process their portion + TR->>D: Response for 800 series + PR->>D: Response for 200 series + D->>C: Combined response +``` + +Migration phases for production clusters: +1. **Phase 1**: Deploy partition ring alongside existing token ring (0% traffic) +2. **Phase 2**: Route 10% traffic to partition ring +3. **Phase 3**: Gradually increase to 50% traffic +4. **Phase 4**: Route 90% traffic to partition ring +5. **Phase 5**: Complete migration (100% partition ring) + +This multi-ring approach solves the migration problem for existing production deployments that cannot afford downtime during the transition from token-based to partition-based rings. It provides **zero downtime migration** with **rollback capability** and **incremental validation** at each step. However, it requires **dual ring participation** where ingesters must participate in both rings during migration, **increased memory usage** and **migration coordination** requiring careful percentage management and monitoring. + +#### Read Path Considerations + +During migration, the read path (queriers and rulers) must have visibility into both rings to ensure all functionality works correctly: + +- **Queriers** must check both token and partition rings to locate series data, as data may be distributed across both ring types during migration +- **Rulers** must evaluate rules against data from both rings to ensure complete rule evaluation +- **Ring-aware components** (like shuffle sharding) must operate correctly across both ring types +- **Metadata operations** (like label queries) must aggregate results from both rings + +All existing Cortex functionality must continue to work seamlessly during the migration period, requiring components to transparently handle the dual-ring architecture. diff --git a/docs/roadmap.md b/docs/roadmap.md index 45815e35f7..d4cfd051f2 100644 --- a/docs/roadmap.md +++ b/docs/roadmap.md @@ -37,3 +37,15 @@ For more information tracking this, please see [issue #6075](https://github.com/ This makes queries over long periods more efficient. It can reduce storage space slightly if the full-detail data is discarded. For more information tracking this, please see [issue #4322](https://github.com/cortexproject/cortex/issues/4322). + +## Changes to this Roadmap + +Changes to this roadmap will take the form of pull requests containing the suggested change. All such PRs must be posted to the [#cortex](https://cloud-native.slack.com/archives/CCYDASBLP) Slack channel in +the [CNCF slack](https://communityinviter.com/apps/cloud-native/cncf) so that they're made visible to all other developers and maintainers. + +Significant changes to this document should be discussed in the [monthly meeting](https://github.com/cortexproject/cortex?tab=readme-ov-file#engage-with-our-community) +before merging, to raise awareness of the change and to provide an opportunity for discussion. A significant change is one which meaningfully alters +one of the roadmap items, adds a new item, or removes an item. + +Insignificant changes include updating links to issues, spelling fixes or minor rewordings which don't significantly change meanings. These insignificant changes +don't need to be discussed in a meeting but should still be shared in Slack. diff --git a/go.mod b/go.mod index ea2dbcc067..92129a822c 100644 --- a/go.mod +++ b/go.mod @@ -6,8 +6,7 @@ require ( github.com/Masterminds/squirrel v1.5.4 github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b github.com/alicebob/miniredis/v2 v2.35.0 - github.com/armon/go-metrics v0.4.1 - github.com/aws/aws-sdk-go v1.55.7 + github.com/aws/aws-sdk-go v1.55.7 // indirect github.com/bradfitz/gomemcache v0.0.0-20250403215159-8d39553ac7cf github.com/cortexproject/promqlsmith v0.0.0-20250407233056-90db95b1a4e4 github.com/dustin/go-humanize v1.0.1 @@ -26,14 +25,15 @@ require ( github.com/gorilla/mux v1.8.1 github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 - github.com/hashicorp/consul/api v1.31.2 + github.com/hashicorp/consul/api v1.32.0 github.com/hashicorp/go-cleanhttp v0.5.2 + github.com/hashicorp/go-metrics v0.5.4 github.com/hashicorp/go-sockaddr v1.0.7 - github.com/hashicorp/memberlist v0.5.1 + github.com/hashicorp/memberlist v0.5.3 github.com/json-iterator/go v1.1.12 github.com/klauspost/compress v1.18.0 github.com/lib/pq v1.10.9 - github.com/minio/minio-go/v7 v7.0.80 + github.com/minio/minio-go/v7 v7.0.93 github.com/mitchellh/go-wordwrap v1.0.1 github.com/oklog/ulid v1.3.1 // indirect github.com/opentracing-contrib/go-grpc v0.1.2 @@ -41,18 +41,18 @@ require ( github.com/opentracing/opentracing-go v1.2.0 github.com/pkg/errors v0.9.1 github.com/prometheus/alertmanager v0.28.1 - github.com/prometheus/client_golang v1.22.0 + github.com/prometheus/client_golang v1.23.0-rc.1 github.com/prometheus/client_model v0.6.2 - github.com/prometheus/common v0.63.0 + github.com/prometheus/common v0.65.1-0.20250703115700-7f8b2a0d32d3 // Prometheus maps version 2.x.y to tags v0.x.y. - github.com/prometheus/prometheus v0.303.1 + github.com/prometheus/prometheus v0.305.1-0.20250808023455-1e4144a496fb github.com/segmentio/fasthash v1.0.3 github.com/sony/gobreaker v1.0.0 github.com/spf13/afero v1.11.0 github.com/stretchr/testify v1.10.0 - github.com/thanos-io/objstore v0.0.0-20250317105316-a0136a6f898d - github.com/thanos-io/promql-engine v0.0.0-20250611170940-015ebeb7b5ff - github.com/thanos-io/thanos v0.39.2 + github.com/thanos-io/objstore v0.0.0-20250722142242-922b22272ee3 + github.com/thanos-io/promql-engine v0.0.0-20250924193140-e9123dc11264 + github.com/thanos-io/thanos v0.39.3-0.20250729120336-88d0ae8071cb github.com/uber/jaeger-client-go v2.30.0+incompatible github.com/weaveworks/common v0.0.0-20230728070032-dd9e68f319d5 go.etcd.io/etcd/api/v3 v3.5.17 @@ -76,33 +76,38 @@ require ( require ( github.com/VictoriaMetrics/fastcache v1.12.2 + github.com/aws/aws-sdk-go-v2 v1.38.3 + github.com/aws/aws-sdk-go-v2/config v1.29.15 + github.com/aws/aws-sdk-go-v2/service/dynamodb v1.50.1 github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 github.com/cespare/xxhash/v2 v2.3.0 github.com/google/go-cmp v0.7.0 + github.com/google/uuid v1.6.0 github.com/hashicorp/golang-lru/v2 v2.0.7 github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 github.com/oklog/ulid/v2 v2.1.1 github.com/parquet-go/parquet-go v0.25.1 - github.com/prometheus-community/parquet-common v0.0.0-20250716185251-4cfa597e936c + github.com/prometheus-community/parquet-common v0.0.0-20250827225610-65f0b68d35e6 + github.com/prometheus/client_golang/exp v0.0.0-20250914183048-a974e0d45e0a github.com/prometheus/procfs v0.16.1 github.com/sercand/kuberesolver/v5 v5.1.1 github.com/tjhop/slog-gokit v0.1.4 - go.opentelemetry.io/collector/pdata v1.34.0 + go.opentelemetry.io/collector/pdata v1.35.0 go.uber.org/automaxprocs v1.6.0 google.golang.org/protobuf v1.36.6 ) require ( cel.dev/expr v0.23.1 // indirect - cloud.google.com/go v0.118.1 // indirect - cloud.google.com/go/auth v0.15.1-0.20250317171031-671eed979bfd // indirect + cloud.google.com/go v0.120.0 // indirect + cloud.google.com/go/auth v0.16.2 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect cloud.google.com/go/compute/metadata v0.7.0 // indirect - cloud.google.com/go/iam v1.3.1 // indirect - cloud.google.com/go/monitoring v1.24.0 // indirect + cloud.google.com/go/iam v1.5.2 // indirect + cloud.google.com/go/monitoring v1.24.2 // indirect cloud.google.com/go/storage v1.50.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.1 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.1 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 // indirect github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.1 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 // indirect @@ -111,20 +116,20 @@ require ( github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.52.0 // indirect github.com/alecthomas/kingpin/v2 v2.4.0 // indirect github.com/andybalholm/brotli v1.1.1 // indirect + github.com/armon/go-metrics v0.4.1 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect - github.com/aws/aws-sdk-go-v2 v1.36.3 // indirect - github.com/aws/aws-sdk-go-v2/config v1.29.15 // indirect github.com/aws/aws-sdk-go-v2/credentials v1.17.68 // indirect github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.6 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.6 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.11.6 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.0 // indirect github.com/aws/aws-sdk-go-v2/service/sso v1.25.3 // indirect github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1 // indirect github.com/aws/aws-sdk-go-v2/service/sts v1.33.20 // indirect - github.com/aws/smithy-go v1.22.3 // indirect + github.com/aws/smithy-go v1.23.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect github.com/caio/go-tdigest v3.1.0+incompatible // indirect @@ -147,8 +152,8 @@ require ( github.com/fatih/color v1.18.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.9.0 // indirect + github.com/go-chi/chi/v5 v5.2.2 // indirect github.com/go-ini/ini v1.67.0 // indirect - github.com/go-jose/go-jose/v4 v4.0.5 // indirect github.com/go-logfmt/logfmt v0.6.0 // indirect github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect @@ -160,7 +165,7 @@ require ( github.com/go-openapi/runtime v0.28.0 // indirect github.com/go-openapi/spec v0.21.0 // indirect github.com/go-openapi/validate v0.24.0 // indirect - github.com/go-viper/mapstructure/v2 v2.3.0 // indirect + github.com/go-viper/mapstructure/v2 v2.4.0 // indirect github.com/gobwas/glob v0.2.3 // indirect github.com/goccy/go-json v0.10.5 // indirect github.com/gofrs/uuid v4.4.0+incompatible // indirect @@ -170,9 +175,8 @@ require ( github.com/google/btree v1.1.3 // indirect github.com/google/pprof v0.0.0-20250607225305-033d6d78b36a // indirect github.com/google/s2a-go v0.1.9 // indirect - github.com/google/uuid v1.6.0 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect - github.com/googleapis/gax-go/v2 v2.14.1 // indirect + github.com/googleapis/gax-go/v2 v2.14.2 // indirect github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.2 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect @@ -204,6 +208,7 @@ require ( github.com/mdlayher/vsock v1.2.1 // indirect github.com/metalmatze/signal v0.0.0-20210307161603-1c9aa721a97a // indirect github.com/miekg/dns v1.1.66 // indirect + github.com/minio/crc64nvme v1.0.1 // indirect github.com/minio/md5-simd v1.1.2 // indirect github.com/minio/sha256-simd v1.0.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect @@ -214,18 +219,21 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect github.com/ncw/swift v1.0.53 // indirect - github.com/oklog/run v1.1.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.128.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.128.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.128.0 // indirect + github.com/oklog/run v1.2.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.129.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.129.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.129.0 // indirect + github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c // indirect github.com/pierrec/lz4/v4 v4.1.22 // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus-community/prom-label-proxy v0.11.1 // indirect github.com/prometheus/exporter-toolkit v0.14.0 // indirect - github.com/prometheus/sigv4 v0.1.2 // indirect + github.com/prometheus/otlptranslator v0.0.0-20250731173911-a9673827589a // indirect + github.com/prometheus/sigv4 v0.2.0 // indirect github.com/puzpuzpuz/xsync/v3 v3.5.1 // indirect + github.com/rantav/go-grpc-channelz v0.0.4 // indirect github.com/redis/rueidis v1.0.61 // indirect github.com/rs/cors v1.11.1 // indirect github.com/rs/xid v1.6.0 // indirect @@ -235,26 +243,25 @@ require ( github.com/shurcooL/vfsgen v0.0.0-20230704071429-0000e147ea92 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/soheilhy/cmux v0.1.5 // indirect - github.com/spiffe/go-spiffe/v2 v2.5.0 // indirect github.com/stretchr/objx v0.5.2 // indirect + github.com/tinylib/msgp v1.3.0 // indirect github.com/trivago/tgo v1.0.7 // indirect github.com/uber/jaeger-lib v2.4.1+incompatible // indirect github.com/vimeo/galaxycache v1.3.1 // indirect github.com/weaveworks/promrus v1.2.0 // indirect github.com/xhit/go-str2duration/v2 v2.1.0 // indirect github.com/yuin/gopher-lua v1.1.1 // indirect - github.com/zeebo/errs v1.4.0 // indirect go.mongodb.org/mongo-driver v1.17.4 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/collector/component v1.34.0 // indirect - go.opentelemetry.io/collector/confmap v1.34.0 // indirect - go.opentelemetry.io/collector/confmap/xconfmap v0.128.0 // indirect - go.opentelemetry.io/collector/consumer v1.34.0 // indirect - go.opentelemetry.io/collector/featuregate v1.34.0 // indirect - go.opentelemetry.io/collector/internal/telemetry v0.128.0 // indirect - go.opentelemetry.io/collector/pipeline v0.128.0 // indirect - go.opentelemetry.io/collector/processor v1.34.0 // indirect + go.opentelemetry.io/collector/component v1.35.0 // indirect + go.opentelemetry.io/collector/confmap v1.35.0 // indirect + go.opentelemetry.io/collector/confmap/xconfmap v0.129.0 // indirect + go.opentelemetry.io/collector/consumer v1.35.0 // indirect + go.opentelemetry.io/collector/featuregate v1.35.0 // indirect + go.opentelemetry.io/collector/internal/telemetry v0.129.0 // indirect + go.opentelemetry.io/collector/pipeline v0.129.0 // indirect + go.opentelemetry.io/collector/processor v1.35.0 // indirect go.opentelemetry.io/collector/semconv v0.128.0 // indirect go.opentelemetry.io/contrib/bridges/otelzap v0.11.0 // indirect go.opentelemetry.io/contrib/detectors/gcp v1.35.0 // indirect @@ -282,8 +289,8 @@ require ( golang.org/x/text v0.26.0 // indirect golang.org/x/tools v0.34.0 // indirect gonum.org/v1/gonum v0.16.0 // indirect - google.golang.org/api v0.228.0 // indirect - google.golang.org/genproto v0.0.0-20250204164813-702378808489 // indirect + google.golang.org/api v0.239.0 // indirect + google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect gopkg.in/telebot.v3 v3.3.8 // indirect @@ -295,7 +302,7 @@ require ( ) // Using cortex fork of weaveworks/common -replace github.com/weaveworks/common => github.com/cortexproject/weaveworks-common v0.0.0-20241129212437-96019edf21f1 +replace github.com/weaveworks/common => github.com/cortexproject/weaveworks-common v0.0.0-20250902164925-0315015a8b9f // Override since git.apache.org is down. The docs say to fetch from github. replace git.apache.org/thrift.git => github.com/apache/thrift v0.0.0-20180902110319-2566ecd5d999 @@ -321,7 +328,8 @@ replace github.com/google/gnostic => github.com/googleapis/gnostic v0.6.9 // https://github.com/thanos-io/thanos/blob/fdeea3917591fc363a329cbe23af37c6fff0b5f0/go.mod#L265 replace gopkg.in/alecthomas/kingpin.v2 => github.com/alecthomas/kingpin v1.3.8-0.20210301060133-17f40c25f497 -replace github.com/thanos-io/objstore => github.com/thanos-io/objstore v0.0.0-20241111205755-d1dd89d41f97 +// Wait for fix for https://github.com/grpc/grpc-go/pull/8504. +replace google.golang.org/grpc => google.golang.org/grpc v1.71.2 -// v3.3.1 with https://github.com/prometheus/prometheus/pull/16252. (same as thanos) -replace github.com/prometheus/prometheus => github.com/thanos-io/thanos-prometheus v0.0.0-20250610133519-082594458a88 +// See https://github.com/envoyproxy/go-control-plane/issues/1083 as this version introduces checksum mismatch. +exclude github.com/envoyproxy/go-control-plane/envoy v1.32.3 diff --git a/go.sum b/go.sum index 6985e6f181..24bc17a2f1 100644 --- a/go.sum +++ b/go.sum @@ -1,6 +1,9 @@ +cel.dev/expr v0.15.0/go.mod h1:TRSuuV7DlVCE/uwv5QbAiW/v8l5O8C4eEPHeu7gf7Sg= +cel.dev/expr v0.16.0/go.mod h1:TRSuuV7DlVCE/uwv5QbAiW/v8l5O8C4eEPHeu7gf7Sg= +cel.dev/expr v0.19.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= +cel.dev/expr v0.19.1/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= cel.dev/expr v0.23.1 h1:K4KOtPCJQjVggkARsjG9RWXP6O4R73aHeJMa/dmCQQg= cel.dev/expr v0.23.1/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= @@ -30,56 +33,757 @@ cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+Y cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= +cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U= cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= -cloud.google.com/go v0.118.1 h1:b8RATMcrK9A4BH0rj8yQupPXp+aP+cJ0l6H7V9osV1E= -cloud.google.com/go v0.118.1/go.mod h1:CFO4UPEPi8oV21xoezZCrd3d81K4fFkDTEJu4R8K+9M= -cloud.google.com/go/auth v0.15.1-0.20250317171031-671eed979bfd h1:0y6Ls7Yg2PYIjBiiY4COpxqhv+hRtoDQfY/u/eXNZuw= -cloud.google.com/go/auth v0.15.1-0.20250317171031-671eed979bfd/go.mod h1:uJW0Bahg/VuSfsCxYjfpcKMblBoti/JuY8OQfnmW4Vk= +cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= +cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= +cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= +cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= +cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I= +cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= +cloud.google.com/go v0.110.2/go.mod h1:k04UEeEtb6ZBRTv3dZz4CeJC3jKGxyhl0sAiVVquxiw= +cloud.google.com/go v0.110.4/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= +cloud.google.com/go v0.110.6/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= +cloud.google.com/go v0.120.0 h1:wc6bgG9DHyKqF5/vQvX1CiZrtHnxJjBlKUyF9nP6meA= +cloud.google.com/go v0.120.0/go.mod h1:/beW32s8/pGRuj4IILWQNd4uuebeT4dkOhKmkfit64Q= +cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= +cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= +cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= +cloud.google.com/go/accessapproval v1.7.1/go.mod h1:JYczztsHRMK7NTXb6Xw+dwbs/WnOJxbo/2mTI+Kgg68= +cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= +cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE= +cloud.google.com/go/accesscontextmanager v1.6.0/go.mod h1:8XCvZWfYw3K/ji0iVnp+6pu7huxoQTLmxAbVjbloTtM= +cloud.google.com/go/accesscontextmanager v1.7.0/go.mod h1:CEGLewx8dwa33aDAZQujl7Dx+uYhS0eay198wB/VumQ= +cloud.google.com/go/accesscontextmanager v1.8.0/go.mod h1:uI+AI/r1oyWK99NN8cQ3UK76AMelMzgZCvJfsi2c+ps= +cloud.google.com/go/accesscontextmanager v1.8.1/go.mod h1:JFJHfvuaTC+++1iL1coPiG1eu5D24db2wXCDWDjIrxo= +cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= +cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= +cloud.google.com/go/aiplatform v1.27.0/go.mod h1:Bvxqtl40l0WImSb04d0hXFU7gDOiq9jQmorivIiWcKg= +cloud.google.com/go/aiplatform v1.35.0/go.mod h1:7MFT/vCaOyZT/4IIFfxH4ErVg/4ku6lKv3w0+tFTgXQ= +cloud.google.com/go/aiplatform v1.36.1/go.mod h1:WTm12vJRPARNvJ+v6P52RDHCNe4AhvjcIZ/9/RRHy/k= +cloud.google.com/go/aiplatform v1.37.0/go.mod h1:IU2Cv29Lv9oCn/9LkFiiuKfwrRTq+QQMbW+hPCxJGZw= +cloud.google.com/go/aiplatform v1.45.0/go.mod h1:Iu2Q7sC7QGhXUeOhAj/oCK9a+ULz1O4AotZiqjQ8MYA= +cloud.google.com/go/aiplatform v1.48.0/go.mod h1:Iu2Q7sC7QGhXUeOhAj/oCK9a+ULz1O4AotZiqjQ8MYA= +cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= +cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= +cloud.google.com/go/analytics v0.17.0/go.mod h1:WXFa3WSym4IZ+JiKmavYdJwGG/CvpqiqczmL59bTD9M= +cloud.google.com/go/analytics v0.18.0/go.mod h1:ZkeHGQlcIPkw0R/GW+boWHhCOR43xz9RN/jn7WcqfIE= +cloud.google.com/go/analytics v0.19.0/go.mod h1:k8liqf5/HCnOUkbawNtrWWc+UAzyDlW89doe8TtoDsE= +cloud.google.com/go/analytics v0.21.2/go.mod h1:U8dcUtmDmjrmUTnnnRnI4m6zKn/yaA5N9RlEkYFHpQo= +cloud.google.com/go/analytics v0.21.3/go.mod h1:U8dcUtmDmjrmUTnnnRnI4m6zKn/yaA5N9RlEkYFHpQo= +cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk= +cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc= +cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8= +cloud.google.com/go/apigateway v1.6.1/go.mod h1:ufAS3wpbRjqfZrzpvLC2oh0MFlpRJm2E/ts25yyqmXA= +cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc= +cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04= +cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8= +cloud.google.com/go/apigeeconnect v1.6.1/go.mod h1:C4awq7x0JpLtrlQCr8AzVIzAaYgngRqWf9S5Uhg+wWs= +cloud.google.com/go/apigeeregistry v0.4.0/go.mod h1:EUG4PGcsZvxOXAdyEghIdXwAEi/4MEaoqLMLDMIwKXY= +cloud.google.com/go/apigeeregistry v0.5.0/go.mod h1:YR5+s0BVNZfVOUkMa5pAR2xGd0A473vA5M7j247o1wM= +cloud.google.com/go/apigeeregistry v0.6.0/go.mod h1:BFNzW7yQVLZ3yj0TKcwzb8n25CFBri51GVGOEUcgQsc= +cloud.google.com/go/apigeeregistry v0.7.1/go.mod h1:1XgyjZye4Mqtw7T9TsY4NW10U7BojBvG4RMD+vRDrIw= +cloud.google.com/go/apikeys v0.4.0/go.mod h1:XATS/yqZbaBK0HOssf+ALHp8jAlNHUgyfprvNcBIszU= +cloud.google.com/go/apikeys v0.5.0/go.mod h1:5aQfwY4D+ewMMWScd3hm2en3hCj+BROlyrt3ytS7KLI= +cloud.google.com/go/apikeys v0.6.0/go.mod h1:kbpXu5upyiAlGkKrJgQl8A0rKNNJ7dQ377pdroRSSi8= +cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno= +cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak= +cloud.google.com/go/appengine v1.6.0/go.mod h1:hg6i0J/BD2cKmDJbaFSYHFyZkgBEfQrDg/X0V5fJn84= +cloud.google.com/go/appengine v1.7.0/go.mod h1:eZqpbHFCqRGa2aCdope7eC0SWLV1j0neb/QnMJVWx6A= +cloud.google.com/go/appengine v1.7.1/go.mod h1:IHLToyb/3fKutRysUlFO0BPt5j7RiQ45nrzEJmKTo6E= +cloud.google.com/go/appengine v1.8.1/go.mod h1:6NJXGLVhZCN9aQ/AEDvmfzKEfoYBlfB80/BHiKVputY= +cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= +cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= +cloud.google.com/go/area120 v0.7.0/go.mod h1:a3+8EUD1SX5RUcCs3MY5YasiO1z6yLiNLRiFrykbynY= +cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k= +cloud.google.com/go/area120 v0.8.1/go.mod h1:BVfZpGpB7KFVNxPiQBuHkX6Ed0rS51xIgmGyjrAfzsg= +cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= +cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= +cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0= +cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc= +cloud.google.com/go/artifactregistry v1.11.1/go.mod h1:lLYghw+Itq9SONbCa1YWBoWs1nOucMH0pwXN1rOBZFI= +cloud.google.com/go/artifactregistry v1.11.2/go.mod h1:nLZns771ZGAwVLzTX/7Al6R9ehma4WUEhZGWV6CeQNQ= +cloud.google.com/go/artifactregistry v1.12.0/go.mod h1:o6P3MIvtzTOnmvGagO9v/rOjjA0HmhJ+/6KAXrmYDCI= +cloud.google.com/go/artifactregistry v1.13.0/go.mod h1:uy/LNfoOIivepGhooAUpL1i30Hgee3Cu0l4VTWHUC08= +cloud.google.com/go/artifactregistry v1.14.1/go.mod h1:nxVdG19jTaSTu7yA7+VbWL346r3rIdkZ142BSQqhn5E= +cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= +cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= +cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= +cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ= +cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY= +cloud.google.com/go/asset v1.11.1/go.mod h1:fSwLhbRvC9p9CXQHJ3BgFeQNM4c9x10lqlrdEUYXlJo= +cloud.google.com/go/asset v1.12.0/go.mod h1:h9/sFOa4eDIyKmH6QMpm4eUK3pDojWnUhTgJlk762Hg= +cloud.google.com/go/asset v1.13.0/go.mod h1:WQAMyYek/b7NBpYq/K4KJWcRqzoalEsxz/t/dTk4THw= +cloud.google.com/go/asset v1.14.1/go.mod h1:4bEJ3dnHCqWCDbWJ/6Vn7GVI9LerSi7Rfdi03hd+WTQ= +cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= +cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= +cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= +cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= +cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= +cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= +cloud.google.com/go/assuredworkloads v1.11.1/go.mod h1:+F04I52Pgn5nmPG36CWFtxmav6+7Q+c5QyJoL18Lry0= +cloud.google.com/go/auth v0.16.2 h1:QvBAGFPLrDeoiNjyfVunhQ10HKNYuOwZ5noee0M5df4= +cloud.google.com/go/auth v0.16.2/go.mod h1:sRBas2Y1fB1vZTdurouM0AzuYQBMZinrUYL8EufhtEA= cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= +cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= +cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= +cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= +cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM= +cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU= +cloud.google.com/go/automl v1.13.1/go.mod h1:1aowgAHWYZU27MybSCFiukPO7xnyawv7pt3zK4bheQE= +cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc= +cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI= +cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss= +cloud.google.com/go/baremetalsolution v1.1.1/go.mod h1:D1AV6xwOksJMV4OSlWHtWuFNZZYujJknMAP4Qa27QIA= +cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE= +cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE= +cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g= +cloud.google.com/go/batch v1.3.1/go.mod h1:VguXeQKXIYaeeIYbuozUmBR13AfL4SJP7IltNPS+A4A= +cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4= +cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8= +cloud.google.com/go/beyondcorp v0.4.0/go.mod h1:3ApA0mbhHx6YImmuubf5pyW8srKnCEPON32/5hj+RmM= +cloud.google.com/go/beyondcorp v0.5.0/go.mod h1:uFqj9X+dSfrheVp7ssLTaRHd2EHqSL4QZmH4e8WXGGU= +cloud.google.com/go/beyondcorp v0.6.1/go.mod h1:YhxDWw946SCbmcWo3fAhw3V4XZMSpQ/VYfcKGAEU8/4= +cloud.google.com/go/beyondcorp v1.0.0/go.mod h1:YhxDWw946SCbmcWo3fAhw3V4XZMSpQ/VYfcKGAEU8/4= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= +cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw= +cloud.google.com/go/bigquery v1.44.0/go.mod h1:0Y33VqXTEsbamHJvJHdFmtqHvMIY28aK1+dFsvaChGc= +cloud.google.com/go/bigquery v1.47.0/go.mod h1:sA9XOgy0A8vQK9+MWhEQTY6Tix87M/ZurWFIxmF9I/E= +cloud.google.com/go/bigquery v1.48.0/go.mod h1:QAwSz+ipNgfL5jxiaK7weyOhzdoAy1zFm0Nf1fysJac= +cloud.google.com/go/bigquery v1.49.0/go.mod h1:Sv8hMmTFFYBlt/ftw2uN6dFdQPzBlREY9yBh7Oy7/4Q= +cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU= +cloud.google.com/go/bigquery v1.52.0/go.mod h1:3b/iXjRQGU4nKa87cXeg6/gogLjO8C6PmuM8i5Bi/u4= +cloud.google.com/go/bigquery v1.53.0/go.mod h1:3b/iXjRQGU4nKa87cXeg6/gogLjO8C6PmuM8i5Bi/u4= +cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= +cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= +cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= +cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y= +cloud.google.com/go/billing v1.12.0/go.mod h1:yKrZio/eu+okO/2McZEbch17O5CB5NpZhhXG6Z766ss= +cloud.google.com/go/billing v1.13.0/go.mod h1:7kB2W9Xf98hP9Sr12KfECgfGclsH3CQR0R08tnRlRbc= +cloud.google.com/go/billing v1.16.0/go.mod h1:y8vx09JSSJG02k5QxbycNRrN7FGZB6F3CAcgum7jvGA= +cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= +cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= +cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0= +cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk= +cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q= +cloud.google.com/go/binaryauthorization v1.6.1/go.mod h1:TKt4pa8xhowwffiBmbrbcxijJRZED4zrqnwZ1lKH51U= +cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg= +cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590= +cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8= +cloud.google.com/go/certificatemanager v1.7.1/go.mod h1:iW8J3nG6SaRYImIa+wXQ0g8IgoofDFRp5UMzaNk1UqI= +cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk= +cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk= +cloud.google.com/go/channel v1.11.0/go.mod h1:IdtI0uWGqhEeatSB62VOoJ8FSUhJ9/+iGkJVqp74CGE= +cloud.google.com/go/channel v1.12.0/go.mod h1:VkxCGKASi4Cq7TbXxlaBezonAYpp1GCnKMY6tnMQnLU= +cloud.google.com/go/channel v1.16.0/go.mod h1:eN/q1PFSl5gyu0dYdmxNXscY/4Fi7ABmeHCJNf/oHmc= +cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U= +cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA= +cloud.google.com/go/cloudbuild v1.6.0/go.mod h1:UIbc/w9QCbH12xX+ezUsgblrWv+Cv4Tw83GiSMHOn9M= +cloud.google.com/go/cloudbuild v1.7.0/go.mod h1:zb5tWh2XI6lR9zQmsm1VRA+7OCuve5d8S+zJUul8KTg= +cloud.google.com/go/cloudbuild v1.9.0/go.mod h1:qK1d7s4QlO0VwfYn5YuClDGg2hfmLZEb4wQGAbIgL1s= +cloud.google.com/go/cloudbuild v1.10.1/go.mod h1:lyJg7v97SUIPq4RC2sGsz/9tNczhyv2AjML/ci4ulzU= +cloud.google.com/go/cloudbuild v1.13.0/go.mod h1:lyJg7v97SUIPq4RC2sGsz/9tNczhyv2AjML/ci4ulzU= +cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM= +cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk= +cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA= +cloud.google.com/go/clouddms v1.6.1/go.mod h1:Ygo1vL52Ov4TBZQquhz5fiw2CQ58gvu+PlS6PVXCpZI= +cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY= +cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI= +cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4= +cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI= +cloud.google.com/go/cloudtasks v1.9.0/go.mod h1:w+EyLsVkLWHcOaqNEyvcKAsWp9p29dL6uL9Nst1cI7Y= +cloud.google.com/go/cloudtasks v1.10.0/go.mod h1:NDSoTLkZ3+vExFEWu2UJV1arUyzVDAiZtdWcsUyNwBs= +cloud.google.com/go/cloudtasks v1.11.1/go.mod h1:a9udmnou9KO2iulGscKR0qBYjreuX8oHwpmFsKspEvM= +cloud.google.com/go/cloudtasks v1.12.1/go.mod h1:a9udmnou9KO2iulGscKR0qBYjreuX8oHwpmFsKspEvM= cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= +cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= +cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= +cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE= +cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= +cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs= +cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU= +cloud.google.com/go/compute v1.19.3/go.mod h1:qxvISKp/gYnXkSAD1ppcSOveRAmzxicEv/JlizULFrI= +cloud.google.com/go/compute v1.20.1/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= +cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= +cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= +cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k= +cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg= cloud.google.com/go/compute/metadata v0.7.0 h1:PBWF+iiAerVNe8UCHxdOt6eHLVc3ydFeOCw78U8ytSU= cloud.google.com/go/compute/metadata v0.7.0/go.mod h1:j5MvL9PprKL39t166CoB1uVHfQMs4tFQZZcKwksXUjo= +cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= +cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= +cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= +cloud.google.com/go/contactcenterinsights v1.9.1/go.mod h1:bsg/R7zGLYMVxFFzfh9ooLTruLRCG9fnzhH9KznHhbM= +cloud.google.com/go/contactcenterinsights v1.10.0/go.mod h1:bsg/R7zGLYMVxFFzfh9ooLTruLRCG9fnzhH9KznHhbM= +cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= +cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo= +cloud.google.com/go/container v1.13.1/go.mod h1:6wgbMPeQRw9rSnKBCAJXnds3Pzj03C4JHamr8asWKy4= +cloud.google.com/go/container v1.14.0/go.mod h1:3AoJMPhHfLDxLvrlVWaK57IXzaPnLaZq63WX59aQBfM= +cloud.google.com/go/container v1.15.0/go.mod h1:ft+9S0WGjAyjDggg5S06DXj+fHJICWg8L7isCQe9pQA= +cloud.google.com/go/container v1.22.1/go.mod h1:lTNExE2R7f+DLbAN+rJiKTisauFCaoDq6NURZ83eVH4= +cloud.google.com/go/container v1.24.0/go.mod h1:lTNExE2R7f+DLbAN+rJiKTisauFCaoDq6NURZ83eVH4= +cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= +cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= +cloud.google.com/go/containeranalysis v0.7.0/go.mod h1:9aUL+/vZ55P2CXfuZjS4UjQ9AgXoSw8Ts6lemfmxBxI= +cloud.google.com/go/containeranalysis v0.9.0/go.mod h1:orbOANbwk5Ejoom+s+DUCTTJ7IBdBQJDcSylAx/on9s= +cloud.google.com/go/containeranalysis v0.10.1/go.mod h1:Ya2jiILITMY68ZLPaogjmOMNkwsDrWBSTyBubGXO7j0= +cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= +cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= +cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= +cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H0/tc8f8ZbZIE= +cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM= +cloud.google.com/go/datacatalog v1.8.1/go.mod h1:RJ58z4rMp3gvETA465Vg+ag8BGgBdnRPEMMSTr5Uv+M= +cloud.google.com/go/datacatalog v1.12.0/go.mod h1:CWae8rFkfp6LzLumKOnmVh4+Zle4A3NXLzVJ1d1mRm0= +cloud.google.com/go/datacatalog v1.13.0/go.mod h1:E4Rj9a5ZtAxcQJlEBTLgMTphfP11/lNaAshpoBgemX8= +cloud.google.com/go/datacatalog v1.14.0/go.mod h1:h0PrGtlihoutNMp/uvwhawLQ9+c63Kz65UFqh49Yo+E= +cloud.google.com/go/datacatalog v1.14.1/go.mod h1:d2CevwTG4yedZilwe+v3E3ZBDRMobQfSG/a6cCCN5R4= +cloud.google.com/go/datacatalog v1.16.0/go.mod h1:d2CevwTG4yedZilwe+v3E3ZBDRMobQfSG/a6cCCN5R4= +cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= +cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= +cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE= +cloud.google.com/go/dataflow v0.9.1/go.mod h1:Wp7s32QjYuQDWqJPFFlnBKhkAtiFpMTdg00qGbnIHVw= +cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo= +cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= +cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0= +cloud.google.com/go/dataform v0.6.0/go.mod h1:QPflImQy33e29VuapFdf19oPbE4aYTJxr31OAPV+ulA= +cloud.google.com/go/dataform v0.7.0/go.mod h1:7NulqnVozfHvWUBpMDfKMUESr+85aJsC/2O0o3jWPDE= +cloud.google.com/go/dataform v0.8.1/go.mod h1:3BhPSiw8xmppbgzeBbmDvmSWlwouuJkXsXsb8UBih9M= +cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38= +cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w= +cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8= +cloud.google.com/go/datafusion v1.7.1/go.mod h1:KpoTBbFmoToDExJUso/fcCiguGDk7MEzOWXUsJo0wsI= +cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I= +cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= +cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM= +cloud.google.com/go/datalabeling v0.8.1/go.mod h1:XS62LBSVPbYR54GfYQsPXZjTW8UxCK2fkDciSrpRFdY= +cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA= +cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A= +cloud.google.com/go/dataplex v1.5.2/go.mod h1:cVMgQHsmfRoI5KFYq4JtIBEUbYwc3c7tXmIDhRmNNVQ= +cloud.google.com/go/dataplex v1.6.0/go.mod h1:bMsomC/aEJOSpHXdFKFGQ1b0TDPIeL28nJObeO1ppRs= +cloud.google.com/go/dataplex v1.8.1/go.mod h1:7TyrDT6BCdI8/38Uvp0/ZxBslOslP2X2MPDucliyvSE= +cloud.google.com/go/dataplex v1.9.0/go.mod h1:7TyrDT6BCdI8/38Uvp0/ZxBslOslP2X2MPDucliyvSE= +cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s= +cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI= +cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4= +cloud.google.com/go/dataproc/v2 v2.0.1/go.mod h1:7Ez3KRHdFGcfY7GcevBbvozX+zyWGcwLJvvAMwCaoZ4= +cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo= +cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= +cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c= +cloud.google.com/go/dataqna v0.8.1/go.mod h1:zxZM0Bl6liMePWsHA8RMGAfmTG34vJMapbHAxQ5+WA8= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/datastore v1.10.0/go.mod h1:PC5UzAmDEkAmkfaknstTYbNpgE49HAgW2J1gcgUfmdM= +cloud.google.com/go/datastore v1.11.0/go.mod h1:TvGxBIHCS50u8jzG+AW/ppf87v1of8nwzFNgEZU1D3c= +cloud.google.com/go/datastore v1.12.0/go.mod h1:KjdB88W897MRITkvWWJrg2OUtrR5XVj1EoLgSp6/N70= +cloud.google.com/go/datastore v1.12.1/go.mod h1:KjdB88W897MRITkvWWJrg2OUtrR5XVj1EoLgSp6/N70= +cloud.google.com/go/datastore v1.13.0/go.mod h1:KjdB88W897MRITkvWWJrg2OUtrR5XVj1EoLgSp6/N70= +cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= +cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= +cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g= +cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4= +cloud.google.com/go/datastream v1.6.0/go.mod h1:6LQSuswqLa7S4rPAOZFVjHIG3wJIjZcZrw8JDEDJuIs= +cloud.google.com/go/datastream v1.7.0/go.mod h1:uxVRMm2elUSPuh65IbZpzJNMbuzkcvu5CjMqVIUHrww= +cloud.google.com/go/datastream v1.9.1/go.mod h1:hqnmr8kdUBmrnk65k5wNRoHSCYksvpdZIcZIEl8h43Q= +cloud.google.com/go/datastream v1.10.0/go.mod h1:hqnmr8kdUBmrnk65k5wNRoHSCYksvpdZIcZIEl8h43Q= +cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c= +cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s= +cloud.google.com/go/deploy v1.6.0/go.mod h1:f9PTHehG/DjCom3QH0cntOVRm93uGBDt2vKzAPwpXQI= +cloud.google.com/go/deploy v1.8.0/go.mod h1:z3myEJnA/2wnB4sgjqdMfgxCA0EqC3RBTNcVPs93mtQ= +cloud.google.com/go/deploy v1.11.0/go.mod h1:tKuSUV5pXbn67KiubiUNUejqLs4f5cxxiCNCeyl0F2g= +cloud.google.com/go/deploy v1.13.0/go.mod h1:tKuSUV5pXbn67KiubiUNUejqLs4f5cxxiCNCeyl0F2g= +cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= +cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= +cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= +cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz0njv7sMx/iek= +cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0= +cloud.google.com/go/dialogflow v1.29.0/go.mod h1:b+2bzMe+k1s9V+F2jbJwpHPzrnIyHihAdRFMtn2WXuM= +cloud.google.com/go/dialogflow v1.31.0/go.mod h1:cuoUccuL1Z+HADhyIA7dci3N5zUssgpBJmCzI6fNRB4= +cloud.google.com/go/dialogflow v1.32.0/go.mod h1:jG9TRJl8CKrDhMEcvfcfFkkpp8ZhgPz3sBGmAUYJ2qE= +cloud.google.com/go/dialogflow v1.38.0/go.mod h1:L7jnH+JL2mtmdChzAIcXQHXMvQkE3U4hTaNltEuxXn4= +cloud.google.com/go/dialogflow v1.40.0/go.mod h1:L7jnH+JL2mtmdChzAIcXQHXMvQkE3U4hTaNltEuxXn4= +cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM= +cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q= +cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4= +cloud.google.com/go/dlp v1.10.1/go.mod h1:IM8BWz1iJd8njcNcG0+Kyd9OPnqnRNkDV8j42VT5KOI= +cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU= +cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU= +cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k= +cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4= +cloud.google.com/go/documentai v1.16.0/go.mod h1:o0o0DLTEZ+YnJZ+J4wNfTxmDVyrkzFvttBXXtYRMHkM= +cloud.google.com/go/documentai v1.18.0/go.mod h1:F6CK6iUH8J81FehpskRmhLq/3VlwQvb7TvwOceQ2tbs= +cloud.google.com/go/documentai v1.20.0/go.mod h1:yJkInoMcK0qNAEdRnqY/D5asy73tnPe88I1YTZT+a8E= +cloud.google.com/go/documentai v1.22.0/go.mod h1:yJkInoMcK0qNAEdRnqY/D5asy73tnPe88I1YTZT+a8E= +cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= +cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= +cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE= +cloud.google.com/go/domains v0.9.1/go.mod h1:aOp1c0MbejQQ2Pjf1iJvnVyT+z6R6s8pX66KaCSDYfE= +cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= +cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= +cloud.google.com/go/edgecontainer v0.3.0/go.mod h1:FLDpP4nykgwwIfcLt6zInhprzw0lEi2P1fjO6Ie0qbc= +cloud.google.com/go/edgecontainer v1.0.0/go.mod h1:cttArqZpBB2q58W/upSG++ooo6EsblxDIolxa3jSjbY= +cloud.google.com/go/edgecontainer v1.1.1/go.mod h1:O5bYcS//7MELQZs3+7mabRqoWQhXCzenBu0R8bz2rwk= +cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= +cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI= +cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8= +cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M= +cloud.google.com/go/essentialcontacts v1.6.2/go.mod h1:T2tB6tX+TRak7i88Fb2N9Ok3PvY3UNbUsMag9/BARh4= +cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc= +cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw= +cloud.google.com/go/eventarc v1.10.0/go.mod h1:u3R35tmZ9HvswGRBnF48IlYgYeBcPUCjkr4BTdem2Kw= +cloud.google.com/go/eventarc v1.11.0/go.mod h1:PyUjsUKPWoRBCHeOxZd/lbOOjahV41icXyUY5kSTvVY= +cloud.google.com/go/eventarc v1.12.1/go.mod h1:mAFCW6lukH5+IZjkvrEss+jmt2kOdYlN8aMx3sRJiAI= +cloud.google.com/go/eventarc v1.13.0/go.mod h1:mAFCW6lukH5+IZjkvrEss+jmt2kOdYlN8aMx3sRJiAI= +cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w= +cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI= +cloud.google.com/go/filestore v1.5.0/go.mod h1:FqBXDWBp4YLHqRnVGveOkHDf8svj9r5+mUDLupOWEDs= +cloud.google.com/go/filestore v1.6.0/go.mod h1:di5unNuss/qfZTw2U9nhFqo8/ZDSc466dre85Kydllg= +cloud.google.com/go/filestore v1.7.1/go.mod h1:y10jsorq40JJnjR/lQ8AfFbbcGlw3g+Dp8oN7i7FjV4= cloud.google.com/go/firestore v1.6.1/go.mod h1:asNXNOzBdyVQmEU+ggO8UPodTkEVFW5Qx+rwHnAz+EY= -cloud.google.com/go/iam v1.3.1 h1:KFf8SaT71yYq+sQtRISn90Gyhyf4X8RGgeAVC8XGf3E= -cloud.google.com/go/iam v1.3.1/go.mod h1:3wMtuyT4NcbnYNPLMBzYRFiEfjKfJlLVLrisE7bwm34= +cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= +cloud.google.com/go/firestore v1.11.0/go.mod h1:b38dKhgzlmNNGTNZZwe7ZRFEuRab1Hay3/DBsIGKKy4= +cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= +cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= +cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY= +cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08= +cloud.google.com/go/functions v1.10.0/go.mod h1:0D3hEOe3DbEvCXtYOZHQZmD+SzYsi1YbI7dGvHfldXw= +cloud.google.com/go/functions v1.12.0/go.mod h1:AXWGrF3e2C/5ehvwYo/GH6O5s09tOPksiKhz+hH8WkA= +cloud.google.com/go/functions v1.13.0/go.mod h1:EU4O007sQm6Ef/PwRsI8N2umygGqPBS/IZQKBQBcJ3c= +cloud.google.com/go/functions v1.15.1/go.mod h1:P5yNWUTkyU+LvW/S9O6V+V423VZooALQlqoXdoPz5AE= +cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= +cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= +cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w= +cloud.google.com/go/gaming v1.8.0/go.mod h1:xAqjS8b7jAVW0KFYeRUxngo9My3f33kFmua++Pi+ggM= +cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0= +cloud.google.com/go/gaming v1.10.1/go.mod h1:XQQvtfP8Rb9Rxnxm5wFVpAp9zCQkJi2bLIb7iHGwB3s= +cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam6yNfuQNH60= +cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo= +cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg= +cloud.google.com/go/gkebackup v1.3.0/go.mod h1:vUDOu++N0U5qs4IhG1pcOnD1Mac79xWy6GoBFlWCWBU= +cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o= +cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= +cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw= +cloud.google.com/go/gkeconnect v0.8.1/go.mod h1:KWiK1g9sDLZqhxB2xEuPV8V9NYzrqTUmQR9shJHpOZw= +cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= +cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= +cloud.google.com/go/gkehub v0.11.0/go.mod h1:JOWHlmN+GHyIbuWQPl47/C2RFhnFKH38jH9Ascu3n0E= +cloud.google.com/go/gkehub v0.12.0/go.mod h1:djiIwwzTTBrF5NaXCGv3mf7klpEMcST17VBTVVDcuaw= +cloud.google.com/go/gkehub v0.14.1/go.mod h1:VEXKIJZ2avzrbd7u+zeMtW00Y8ddk/4V9511C9CQGTY= +cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA= +cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI= +cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y= +cloud.google.com/go/gkemulticloud v0.6.1/go.mod h1:kbZ3HKyTsiwqKX7Yw56+wUGwwNZViRnxWK2DVknXWfw= +cloud.google.com/go/gkemulticloud v1.0.0/go.mod h1:kbZ3HKyTsiwqKX7Yw56+wUGwwNZViRnxWK2DVknXWfw= +cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= +cloud.google.com/go/grafeas v0.3.0/go.mod h1:P7hgN24EyONOTMyeJH6DxG4zD7fwiYa5Q6GUgyFSOU8= +cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM= +cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o= +cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo= +cloud.google.com/go/gsuiteaddons v1.6.1/go.mod h1:CodrdOqRZcLp5WOwejHWYBjZvfY0kOphkAKpF/3qdZY= +cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c= +cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= +cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= +cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= +cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg= +cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= +cloud.google.com/go/iam v0.11.0/go.mod h1:9PiLDanza5D+oWFZiH1uG+RnRCfEGKoyl6yo4cgWZGY= +cloud.google.com/go/iam v0.12.0/go.mod h1:knyHGviacl11zrtZUoDuYpDgLjvr28sLQaG0YB2GYAY= +cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= +cloud.google.com/go/iam v1.0.1/go.mod h1:yR3tmSL8BcZB4bxByRv2jkSIahVmCtfKZwLYGBalRE8= +cloud.google.com/go/iam v1.1.0/go.mod h1:nxdHjaKfCr7fNYx/HJMM8LgiMugmveWlkatear5gVyk= +cloud.google.com/go/iam v1.1.1/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= +cloud.google.com/go/iam v1.5.2 h1:qgFRAGEmd8z6dJ/qyEchAuL9jpswyODjA2lS+w234g8= +cloud.google.com/go/iam v1.5.2/go.mod h1:SE1vg0N81zQqLzQEwxL2WI6yhetBdbNQuTvIKCSkUHE= +cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= +cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= +cloud.google.com/go/iap v1.6.0/go.mod h1:NSuvI9C/j7UdjGjIde7t7HBz+QTwBcapPE07+sSRcLk= +cloud.google.com/go/iap v1.7.0/go.mod h1:beqQx56T9O1G1yNPph+spKpNibDlYIiIixiqsQXxLIo= +cloud.google.com/go/iap v1.7.1/go.mod h1:WapEwPc7ZxGt2jFGB/C/bm+hP0Y6NXzOYGjpPnmMS74= +cloud.google.com/go/iap v1.8.1/go.mod h1:sJCbeqg3mvWLqjZNsI6dfAtbbV1DL2Rl7e1mTyXYREQ= +cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= +cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY= +cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4= +cloud.google.com/go/ids v1.4.1/go.mod h1:np41ed8YMU8zOgv53MMMoCntLTn2lF+SUzlM+O3u/jw= +cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= +cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g= +cloud.google.com/go/iot v1.5.0/go.mod h1:mpz5259PDl3XJthEmh9+ap0affn/MqNSP4My77Qql9o= +cloud.google.com/go/iot v1.6.0/go.mod h1:IqdAsmE2cTYYNO1Fvjfzo9po179rAtJeVGUvkLN3rLE= +cloud.google.com/go/iot v1.7.1/go.mod h1:46Mgw7ev1k9KqK1ao0ayW9h0lI+3hxeanz+L1zmbbbk= +cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA= +cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= +cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= +cloud.google.com/go/kms v1.8.0/go.mod h1:4xFEhYFqvW+4VMELtZyxomGSYtSQKzM178ylFW4jMAg= +cloud.google.com/go/kms v1.9.0/go.mod h1:qb1tPTgfF9RQP8e1wq4cLFErVuTJv7UsSC915J8dh3w= +cloud.google.com/go/kms v1.10.0/go.mod h1:ng3KTUtQQU9bPX3+QGLsflZIHlkbn8amFAMY63m8d24= +cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI= +cloud.google.com/go/kms v1.11.0/go.mod h1:hwdiYC0xjnWsKQQCQQmIQnS9asjYVSK6jtXm+zFqXLM= +cloud.google.com/go/kms v1.12.1/go.mod h1:c9J991h5DTl+kg7gi3MYomh12YEENGrf48ee/N/2CDM= +cloud.google.com/go/kms v1.15.0/go.mod h1:c9J991h5DTl+kg7gi3MYomh12YEENGrf48ee/N/2CDM= +cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= +cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= +cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= +cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8= +cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY= +cloud.google.com/go/language v1.10.1/go.mod h1:CPp94nsdVNiQEt1CNjF5WkTcisLiHPyIbMhvR8H2AW0= +cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= +cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= +cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo= +cloud.google.com/go/lifesciences v0.9.1/go.mod h1:hACAOd1fFbCGLr/+weUKRAJas82Y4vrL3O5326N//Wc= +cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw= +cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= cloud.google.com/go/logging v1.13.0 h1:7j0HgAp0B94o1YRDqiqm26w4q1rDMH7XNRU34lJXHYc= cloud.google.com/go/logging v1.13.0/go.mod h1:36CoKh6KA/M0PbhPKMq6/qety2DCAErbhXT62TuXALA= -cloud.google.com/go/longrunning v0.6.4 h1:3tyw9rO3E2XVXzSApn1gyEEnH2K9SynNQjMlBi3uHLg= -cloud.google.com/go/longrunning v0.6.4/go.mod h1:ttZpLCe6e7EXvn9OxpBRx7kZEB0efv8yBO6YnVMfhJs= -cloud.google.com/go/monitoring v1.24.0 h1:csSKiCJ+WVRgNkRzzz3BPoGjFhjPY23ZTcaenToJxMM= -cloud.google.com/go/monitoring v1.24.0/go.mod h1:Bd1PRK5bmQBQNnuGwHBfUamAV1ys9049oEPHnn4pcsc= +cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= +cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= +cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= +cloud.google.com/go/longrunning v0.4.2/go.mod h1:OHrnaYyLUV6oqwh0xiS7e5sLQhP1m0QU9R+WhGDMgIQ= +cloud.google.com/go/longrunning v0.5.0/go.mod h1:0JNuqRShmscVAhIACGtskSAWtqtOoPkwP0YF1oVEchc= +cloud.google.com/go/longrunning v0.5.1/go.mod h1:spvimkwdz6SPWKEt/XBij79E9fiTkHSQl/fRUUQJYJc= +cloud.google.com/go/longrunning v0.6.7 h1:IGtfDWHhQCgCjwQjV9iiLnUta9LBCo8R9QmAFsS/PrE= +cloud.google.com/go/longrunning v0.6.7/go.mod h1:EAFV3IZAKmM56TyiE6VAP3VoTzhZzySwI/YI1s/nRsY= +cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= +cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= +cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= +cloud.google.com/go/managedidentities v1.6.1/go.mod h1:h/irGhTN2SkZ64F43tfGPMbHnypMbu4RB3yl8YcuEak= +cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI= +cloud.google.com/go/maps v0.6.0/go.mod h1:o6DAMMfb+aINHz/p/jbcY+mYeXBoZoxTfdSQ8VAJaCw= +cloud.google.com/go/maps v0.7.0/go.mod h1:3GnvVl3cqeSvgMcpRlQidXsPYuDGQ8naBis7MVzpXsY= +cloud.google.com/go/maps v1.3.0/go.mod h1:6mWTUv+WhnOwAgjVsSW2QPPECmW+s3PcRyOa9vgG/5s= +cloud.google.com/go/maps v1.4.0/go.mod h1:6mWTUv+WhnOwAgjVsSW2QPPECmW+s3PcRyOa9vgG/5s= +cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= +cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= +cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I= +cloud.google.com/go/mediatranslation v0.8.1/go.mod h1:L/7hBdEYbYHQJhX2sldtTO5SZZ1C1vkapubj0T2aGig= +cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= +cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= +cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA= +cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY= +cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM= +cloud.google.com/go/memcache v1.10.1/go.mod h1:47YRQIarv4I3QS5+hoETgKO40InqzLP6kpNLvyXuyaA= +cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY= +cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= +cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8= +cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI= +cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo= +cloud.google.com/go/metastore v1.11.1/go.mod h1:uZuSo80U3Wd4zi6C22ZZliOUJ3XeM/MlYi/z5OAOWRA= +cloud.google.com/go/metastore v1.12.0/go.mod h1:uZuSo80U3Wd4zi6C22ZZliOUJ3XeM/MlYi/z5OAOWRA= +cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk= +cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= +cloud.google.com/go/monitoring v1.12.0/go.mod h1:yx8Jj2fZNEkL/GYZyTLS4ZtZEZN8WtDEiEqG4kLK50w= +cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= +cloud.google.com/go/monitoring v1.15.1/go.mod h1:lADlSAlFdbqQuwwpaImhsJXu1QSdd3ojypXrFSMr2rM= +cloud.google.com/go/monitoring v1.24.2 h1:5OTsoJ1dXYIiMiuL+sYscLc9BumrL3CarVLL7dd7lHM= +cloud.google.com/go/monitoring v1.24.2/go.mod h1:x7yzPWcgDRnPEv3sI+jJGBkwl5qINf+6qY4eq0I9B4U= +cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= +cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= +cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= +cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8= +cloud.google.com/go/networkconnectivity v1.10.0/go.mod h1:UP4O4sWXJG13AqrTdQCD9TnLGEbtNRqjuaaA7bNjF5E= +cloud.google.com/go/networkconnectivity v1.11.0/go.mod h1:iWmDD4QF16VCDLXUqvyspJjIEtBR/4zq5hwnY2X3scM= +cloud.google.com/go/networkconnectivity v1.12.1/go.mod h1:PelxSWYM7Sh9/guf8CFhi6vIqf19Ir/sbfZRUwXh92E= +cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8= +cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4= +cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY= +cloud.google.com/go/networkmanagement v1.8.0/go.mod h1:Ho/BUGmtyEqrttTgWEe7m+8vDdK74ibQc+Be0q7Fof0= +cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= +cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= +cloud.google.com/go/networksecurity v0.7.0/go.mod h1:mAnzoxx/8TBSyXEeESMy9OOYwo1v+gZ5eMRnsT5bC8k= +cloud.google.com/go/networksecurity v0.8.0/go.mod h1:B78DkqsxFG5zRSVuwYFRZ9Xz8IcQ5iECsNrPn74hKHU= +cloud.google.com/go/networksecurity v0.9.1/go.mod h1:MCMdxOKQ30wsBI1eI659f9kEp4wuuAueoC9AJKSPWZQ= +cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= +cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= +cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA= +cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0= +cloud.google.com/go/notebooks v1.7.0/go.mod h1:PVlaDGfJgj1fl1S3dUwhFMXFgfYGhYQt2164xOMONmE= +cloud.google.com/go/notebooks v1.8.0/go.mod h1:Lq6dYKOYOWUCTvw5t2q1gp1lAp0zxAxRycayS0iJcqQ= +cloud.google.com/go/notebooks v1.9.1/go.mod h1:zqG9/gk05JrzgBt4ghLzEepPHNwE5jgPcHZRKhlC1A8= +cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4= +cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs= +cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI= +cloud.google.com/go/optimization v1.4.1/go.mod h1:j64vZQP7h9bO49m2rVaTVoNM0vEBEN5eKPUPbZyXOrk= +cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA= +cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk= +cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ= +cloud.google.com/go/orchestration v1.8.1/go.mod h1:4sluRF3wgbYVRqz7zJ1/EUNc90TTprliq9477fGobD8= +cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE= +cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc= +cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc= +cloud.google.com/go/orgpolicy v1.11.0/go.mod h1:2RK748+FtVvnfuynxBzdnyu7sygtoZa1za/0ZfpOs1M= +cloud.google.com/go/orgpolicy v1.11.1/go.mod h1:8+E3jQcpZJQliP+zaFfayC2Pg5bmhuLK755wKhIIUCE= +cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs= +cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg= +cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo= +cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh+l4WK6GnWw= +cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw= +cloud.google.com/go/osconfig v1.12.0/go.mod h1:8f/PaYzoS3JMVfdfTubkowZYGmAhUCjjwnjqWI7NVBc= +cloud.google.com/go/osconfig v1.12.1/go.mod h1:4CjBxND0gswz2gfYRCUoUzCm9zCABp91EeTtWXyz0tE= +cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E= +cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU= +cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70= +cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo= +cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs= +cloud.google.com/go/oslogin v1.10.1/go.mod h1:x692z7yAue5nE7CsSnoG0aaMbNoRJRXO4sn73R+ZqAs= +cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0= +cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= +cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk= +cloud.google.com/go/phishingprotection v0.8.1/go.mod h1:AxonW7GovcA8qdEk13NfHq9hNx5KPtfxXNeUxTDxB6I= +cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg= +cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE= +cloud.google.com/go/policytroubleshooter v1.5.0/go.mod h1:Rz1WfV+1oIpPdN2VvvuboLVRsB1Hclg3CKQ53j9l8vw= +cloud.google.com/go/policytroubleshooter v1.6.0/go.mod h1:zYqaPTsmfvpjm5ULxAyD/lINQxJ0DDsnWOP/GZ7xzBc= +cloud.google.com/go/policytroubleshooter v1.7.1/go.mod h1:0NaT5v3Ag1M7U5r0GfDCpUFkWd9YqpubBWsQlhanRv0= +cloud.google.com/go/policytroubleshooter v1.8.0/go.mod h1:tmn5Ir5EToWe384EuboTcVQT7nTag2+DuH3uHmKd1HU= +cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= +cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= +cloud.google.com/go/privatecatalog v0.7.0/go.mod h1:2s5ssIFO69F5csTXcwBP7NPFTZvps26xGzvQ2PQaBYg= +cloud.google.com/go/privatecatalog v0.8.0/go.mod h1:nQ6pfaegeDAq/Q5lrfCQzQLhubPiZhSaNhIgfJlnIXs= +cloud.google.com/go/privatecatalog v0.9.1/go.mod h1:0XlDXW2unJXdf9zFz968Hp35gl/bhF4twwpXZAW50JA= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/pubsub v1.26.0/go.mod h1:QgBH3U/jdJy/ftjPhTkyXNj543Tin1pRYcdcPRnFIRI= +cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0= +cloud.google.com/go/pubsub v1.28.0/go.mod h1:vuXFpwaVoIPQMGXqRyUQigu/AX1S3IWugR9xznmcXX8= +cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4= +cloud.google.com/go/pubsub v1.32.0/go.mod h1:f+w71I33OMyxf9VpMVcZbnG5KSUkCOUHYpFd5U1GdRc= +cloud.google.com/go/pubsub v1.33.0/go.mod h1:f+w71I33OMyxf9VpMVcZbnG5KSUkCOUHYpFd5U1GdRc= +cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg= +cloud.google.com/go/pubsublite v1.6.0/go.mod h1:1eFCS0U11xlOuMFV/0iBqw3zP12kddMeCbj/F3FSj9k= +cloud.google.com/go/pubsublite v1.7.0/go.mod h1:8hVMwRXfDfvGm3fahVbtDbiLePT3gpoiJYJY+vxWxVM= +cloud.google.com/go/pubsublite v1.8.1/go.mod h1:fOLdU4f5xldK4RGJrBMm+J7zMWNj/k4PxwEZXy39QS0= +cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= +cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= +cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= +cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo= +cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE= +cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U= +cloud.google.com/go/recaptchaenterprise/v2 v2.6.0/go.mod h1:RPauz9jeLtB3JVzg6nCbe12qNoaa8pXc4d/YukAmcnA= +cloud.google.com/go/recaptchaenterprise/v2 v2.7.0/go.mod h1:19wVj/fs5RtYtynAPJdDTb69oW0vNHYDBTbB4NvMD9c= +cloud.google.com/go/recaptchaenterprise/v2 v2.7.2/go.mod h1:kR0KjsJS7Jt1YSyWFkseQ756D45kaYNTlDPPaRAvDBU= +cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= +cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= +cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac= +cloud.google.com/go/recommendationengine v0.8.1/go.mod h1:MrZihWwtFYWDzE6Hz5nKcNz3gLizXVIDI/o3G1DLcrE= +cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg= +cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c= +cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs= +cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70= +cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ= +cloud.google.com/go/recommender v1.10.1/go.mod h1:XFvrE4Suqn5Cq0Lf+mCP6oBHD/yRMA8XxP5sb7Q7gpA= +cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y= +cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A= +cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA= +cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM= +cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ= +cloud.google.com/go/redis v1.13.1/go.mod h1:VP7DGLpE91M6bcsDdMuyCm2hIpB6Vp2hI090Mfd1tcg= +cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA= +cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0= +cloud.google.com/go/resourcemanager v1.5.0/go.mod h1:eQoXNAiAvCf5PXxWxXjhKQoTMaUSNrEfg+6qdf/wots= +cloud.google.com/go/resourcemanager v1.6.0/go.mod h1:YcpXGRs8fDzcUl1Xw8uOVmI8JEadvhRIkoXXUNVYcVo= +cloud.google.com/go/resourcemanager v1.7.0/go.mod h1:HlD3m6+bwhzj9XCouqmeiGuni95NTrExfhoSrkC/3EI= +cloud.google.com/go/resourcemanager v1.9.1/go.mod h1:dVCuosgrh1tINZ/RwBufr8lULmWGOkPS8gL5gqyjdT8= +cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU= +cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg= +cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA= +cloud.google.com/go/resourcesettings v1.6.1/go.mod h1:M7mk9PIZrC5Fgsu1kZJci6mpgN8o0IUzVx3eJU3y4Jw= +cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4= +cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY= +cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc= +cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y= +cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14= +cloud.google.com/go/retail v1.14.1/go.mod h1:y3Wv3Vr2k54dLNIrCzenyKG8g8dhvhncT2NcNjb/6gE= +cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do= +cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo= +cloud.google.com/go/run v0.8.0/go.mod h1:VniEnuBwqjigv0A7ONfQUaEItaiCRVujlMqerPPiktM= +cloud.google.com/go/run v0.9.0/go.mod h1:Wwu+/vvg8Y+JUApMwEDfVfhetv30hCG4ZwDR/IXl2Qg= +cloud.google.com/go/run v1.2.0/go.mod h1:36V1IlDzQ0XxbQjUx6IYbw8H3TJnWvhii963WW3B/bo= +cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= +cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= +cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk= +cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44= +cloud.google.com/go/scheduler v1.8.0/go.mod h1:TCET+Y5Gp1YgHT8py4nlg2Sew8nUHMqcpousDgXJVQc= +cloud.google.com/go/scheduler v1.9.0/go.mod h1:yexg5t+KSmqu+njTIh3b7oYPheFtBWGcbVUYF1GGMIc= +cloud.google.com/go/scheduler v1.10.1/go.mod h1:R63Ldltd47Bs4gnhQkmNDse5w8gBRrhObZ54PxgR2Oo= +cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= +cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4= +cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4= +cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU= +cloud.google.com/go/secretmanager v1.11.1/go.mod h1:znq9JlXgTNdBeQk9TBW/FnR/W4uChEKGeqQWAJ8SXFw= +cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4= +cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0= +cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= +cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q= +cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA= +cloud.google.com/go/security v1.12.0/go.mod h1:rV6EhrpbNHrrxqlvW0BWAIawFWq3X90SduMJdFwtLB8= +cloud.google.com/go/security v1.13.0/go.mod h1:Q1Nvxl1PAgmeW0y3HTt54JYIvUdtcpYKVfIB8AOMZ+0= +cloud.google.com/go/security v1.15.1/go.mod h1:MvTnnbsWnehoizHi09zoiZob0iCHVcL4AUBj76h9fXA= +cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= +cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= +cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk= +cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk= +cloud.google.com/go/securitycenter v1.18.1/go.mod h1:0/25gAzCM/9OL9vVx4ChPeM/+DlfGQJDwBy/UC8AKK0= +cloud.google.com/go/securitycenter v1.19.0/go.mod h1:LVLmSg8ZkkyaNy4u7HCIshAngSQ8EcIRREP3xBnyfag= +cloud.google.com/go/securitycenter v1.23.0/go.mod h1:8pwQ4n+Y9WCWM278R8W3nF65QtY172h4S8aXyI9/hsQ= +cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU= +cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s= +cloud.google.com/go/servicecontrol v1.10.0/go.mod h1:pQvyvSRh7YzUF2efw7H87V92mxU8FnFDawMClGCNuAA= +cloud.google.com/go/servicecontrol v1.11.0/go.mod h1:kFmTzYzTUIuZs0ycVqRHNaNhgR+UMUpw9n02l/pY+mc= +cloud.google.com/go/servicecontrol v1.11.1/go.mod h1:aSnNNlwEFBY+PWGQ2DoM0JJ/QUXqV5/ZD9DOLB7SnUk= +cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs= +cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= +cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4= +cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U= +cloud.google.com/go/servicedirectory v1.8.0/go.mod h1:srXodfhY1GFIPvltunswqXpVxFPpZjf8nkKQT7XcXaY= +cloud.google.com/go/servicedirectory v1.9.0/go.mod h1:29je5JjiygNYlmsGz8k6o+OZ8vd4f//bQLtvzkPPT/s= +cloud.google.com/go/servicedirectory v1.10.1/go.mod h1:Xv0YVH8s4pVOwfM/1eMTl0XJ6bzIOSLDt8f8eLaGOxQ= +cloud.google.com/go/servicedirectory v1.11.0/go.mod h1:Xv0YVH8s4pVOwfM/1eMTl0XJ6bzIOSLDt8f8eLaGOxQ= +cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco= +cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo= +cloud.google.com/go/servicemanagement v1.6.0/go.mod h1:aWns7EeeCOtGEX4OvZUWCCJONRZeFKiptqKf1D0l/Jc= +cloud.google.com/go/servicemanagement v1.8.0/go.mod h1:MSS2TDlIEQD/fzsSGfCdJItQveu9NXnUniTrq/L8LK4= +cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E= +cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU= +cloud.google.com/go/serviceusage v1.5.0/go.mod h1:w8U1JvqUqwJNPEOTQjrMHkw3IaIFLoLsPLvsE3xueec= +cloud.google.com/go/serviceusage v1.6.0/go.mod h1:R5wwQcbOWsyuOfbP9tGdAnCAc6B9DRwPG1xtWMDeuPA= +cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4= +cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw= +cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A= +cloud.google.com/go/shell v1.7.1/go.mod h1:u1RaM+huXFaTojTbW4g9P5emOrrmLE69KrxqQahKn4g= +cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos= +cloud.google.com/go/spanner v1.44.0/go.mod h1:G8XIgYdOK+Fbcpbs7p2fiprDw4CaZX63whnSMLVBxjk= +cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M= +cloud.google.com/go/spanner v1.47.0/go.mod h1:IXsJwVW2j4UKs0eYDqodab6HgGuA1bViSqW4uH9lfUI= +cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= +cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= +cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= +cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco= +cloud.google.com/go/speech v1.14.1/go.mod h1:gEosVRPJ9waG7zqqnsHpYTOoAS4KouMRLDFMekpJ0J0= +cloud.google.com/go/speech v1.15.0/go.mod h1:y6oH7GhqCaZANH7+Oe0BhgIogsNInLlz542tg3VqeYI= +cloud.google.com/go/speech v1.17.1/go.mod h1:8rVNzU43tQvxDaGvqOhpDqgkJTFowBpDvCJ14kGlJYo= +cloud.google.com/go/speech v1.19.0/go.mod h1:8rVNzU43tQvxDaGvqOhpDqgkJTFowBpDvCJ14kGlJYo= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= +cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= +cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= +cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= +cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y= +cloud.google.com/go/storage v1.29.0/go.mod h1:4puEjyTKnku6gfKoTfNOU/W+a9JyuVNxjpS5GBrB8h4= +cloud.google.com/go/storage v1.30.1/go.mod h1:NfxhC0UJE1aXSx7CIIbCf7y9HKT7BiccwkR7+P7gN8E= cloud.google.com/go/storage v1.50.0 h1:3TbVkzTooBvnZsk7WaAQfOsNrdoM8QHusXA1cpk6QJs= cloud.google.com/go/storage v1.50.0/go.mod h1:l7XeiD//vx5lfqE3RavfmU9yvk5Pp0Zhcv482poyafY= -cloud.google.com/go/trace v1.11.4 h1:LKlhVyX6I4+heP31sWvERSKZZ9cPPEZumt7b4SKVK18= -cloud.google.com/go/trace v1.11.4/go.mod h1:lCSHzSPZC1TPwto7zhaRt3KtGYsXFyaErPQ18AUUeUE= +cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= +cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= +cloud.google.com/go/storagetransfer v1.7.0/go.mod h1:8Giuj1QNb1kfLAiWM1bN6dHzfdlDAVC9rv9abHot2W4= +cloud.google.com/go/storagetransfer v1.8.0/go.mod h1:JpegsHHU1eXg7lMHkvf+KE5XDJ7EQu0GwNJbbVGanEw= +cloud.google.com/go/storagetransfer v1.10.0/go.mod h1:DM4sTlSmGiNczmV6iZyceIh2dbs+7z2Ayg6YAiQlYfA= +cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= +cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= +cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM= +cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA= +cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c= +cloud.google.com/go/talent v1.6.2/go.mod h1:CbGvmKCG61mkdjcqTcLOkb2ZN1SrQI8MDyma2l7VD24= +cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8= +cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4= +cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc= +cloud.google.com/go/texttospeech v1.7.1/go.mod h1:m7QfG5IXxeneGqTapXNxv2ItxP/FS0hCZBwXYqucgSk= +cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ= +cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg= +cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM= +cloud.google.com/go/tpu v1.6.1/go.mod h1:sOdcHVIgDEEOKuqUoi6Fq53MKHJAtOwtz0GuKsWSH3E= +cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28= +cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= +cloud.google.com/go/trace v1.8.0/go.mod h1:zH7vcsbAhklH8hWFig58HvxcxyQbaIqMarMg9hn5ECA= +cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= +cloud.google.com/go/trace v1.10.1/go.mod h1:gbtL94KE5AJLH3y+WVpfWILmqgc6dXcqgNXdOPAQTYk= +cloud.google.com/go/trace v1.11.6 h1:2O2zjPzqPYAHrn3OKl029qlqG6W8ZdYaOWRyr8NgMT4= +cloud.google.com/go/trace v1.11.6/go.mod h1:GA855OeDEBiBMzcckLPE2kDunIpC72N+Pq8WFieFjnI= +cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= +cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= +cloud.google.com/go/translate v1.5.0/go.mod h1:29YDSYveqqpA1CQFD7NQuP49xymq17RXNaUDdc0mNu0= +cloud.google.com/go/translate v1.6.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= +cloud.google.com/go/translate v1.7.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= +cloud.google.com/go/translate v1.8.1/go.mod h1:d1ZH5aaOA0CNhWeXeC8ujd4tdCFw8XoNWRljklu5RHs= +cloud.google.com/go/translate v1.8.2/go.mod h1:d1ZH5aaOA0CNhWeXeC8ujd4tdCFw8XoNWRljklu5RHs= +cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk= +cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw= +cloud.google.com/go/video v1.12.0/go.mod h1:MLQew95eTuaNDEGriQdcYn0dTwf9oWiA4uYebxM5kdg= +cloud.google.com/go/video v1.13.0/go.mod h1:ulzkYlYgCp15N2AokzKjy7MQ9ejuynOJdf1tR5lGthk= +cloud.google.com/go/video v1.14.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= +cloud.google.com/go/video v1.15.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= +cloud.google.com/go/video v1.17.1/go.mod h1:9qmqPqw/Ib2tLqaeHgtakU+l5TcJxCJbhFXM7UJjVzU= +cloud.google.com/go/video v1.19.0/go.mod h1:9qmqPqw/Ib2tLqaeHgtakU+l5TcJxCJbhFXM7UJjVzU= +cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= +cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= +cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M= +cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU= +cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU= +cloud.google.com/go/videointelligence v1.11.1/go.mod h1:76xn/8InyQHarjTWsBR058SmlPCwQjgcvoW0aZykOvo= +cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0= +cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo= +cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo= +cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY= +cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E= +cloud.google.com/go/vision/v2 v2.6.0/go.mod h1:158Hes0MvOS9Z/bDMSFpjwsUrZ5fPrdwuyyvKSGAGMY= +cloud.google.com/go/vision/v2 v2.7.0/go.mod h1:H89VysHy21avemp6xcf9b9JvZHVehWbET0uT/bcuY/0= +cloud.google.com/go/vision/v2 v2.7.2/go.mod h1:jKa8oSYBWhYiXarHPvP4USxYANYUEdEsQrloLjrSwJU= +cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE= +cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g= +cloud.google.com/go/vmmigration v1.5.0/go.mod h1:E4YQ8q7/4W9gobHjQg4JJSgXXSgY21nA5r8swQV+Xxc= +cloud.google.com/go/vmmigration v1.6.0/go.mod h1:bopQ/g4z+8qXzichC7GW1w2MjbErL54rk3/C843CjfY= +cloud.google.com/go/vmmigration v1.7.1/go.mod h1:WD+5z7a/IpZ5bKK//YmT9E047AD+rjycCAvyMxGJbro= +cloud.google.com/go/vmwareengine v0.1.0/go.mod h1:RsdNEf/8UDvKllXhMz5J40XxDrNJNN4sagiox+OI208= +cloud.google.com/go/vmwareengine v0.2.2/go.mod h1:sKdctNJxb3KLZkE/6Oui94iw/xs9PRNC2wnNLXsHvH8= +cloud.google.com/go/vmwareengine v0.3.0/go.mod h1:wvoyMvNWdIzxMYSpH/R7y2h5h3WFkx6d+1TIsP39WGY= +cloud.google.com/go/vmwareengine v0.4.1/go.mod h1:Px64x+BvjPZwWuc4HdmVhoygcXqEkGHXoa7uyfTgSI0= +cloud.google.com/go/vmwareengine v1.0.0/go.mod h1:Px64x+BvjPZwWuc4HdmVhoygcXqEkGHXoa7uyfTgSI0= +cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w= +cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8= +cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes= +cloud.google.com/go/vpcaccess v1.7.1/go.mod h1:FogoD46/ZU+JUBX9D606X21EnxiszYi2tArQwLY4SXs= +cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE= +cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg= +cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc= +cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A= +cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg= +cloud.google.com/go/webrisk v1.9.1/go.mod h1:4GCmXKcOa2BZcZPn6DCEvE7HypmEJcJkr4mtM+sqYPc= +cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo= +cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ= +cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng= +cloud.google.com/go/websecurityscanner v1.6.1/go.mod h1:Njgaw3rttgRHXzwCB8kgCYqv5/rGpFCsBOvPbYgszpg= +cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0= +cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= +cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M= +cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA= +cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= +cloud.google.com/go/workflows v1.11.1/go.mod h1:Z+t10G1wF7h8LgdY/EmRcQY8ptBD/nvofaL6FqlET6g= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0 h1:Gt0j3wceWMwPmiazCa8MzMA0MfhmPIz0Qp0FJ6qcM0U= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0/go.mod h1:Ot/6aikWnKWi4l9QB7qVSwa8iMphQNqkWALMoNT3rzM= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.0 h1:j8BorDEigD8UFOSZQiSqAMOOleyQOOQPnUAwV+Ls1gA= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.0/go.mod h1:JdM5psgjfBf5fo2uWOZhflPWyDBZ/O/CNAH9CtsuZE4= +gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8= +git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.1 h1:Wc1ml6QlJs2BHQ/9Bqu1jiyggbsSjramq2oUmp5WeIo= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.1/go.mod h1:Ot/6aikWnKWi4l9QB7qVSwa8iMphQNqkWALMoNT3rzM= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.1 h1:B+blDbyVIG3WaikNxPnhPiJ1MThR03b3vKGtER95TP4= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.1/go.mod h1:JdM5psgjfBf5fo2uWOZhflPWyDBZ/O/CNAH9CtsuZE4= github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2 h1:yz1bePFlP5Vws5+8ez6T3HWXPmwOK7Yvq8QxDBD3SKY= github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2/go.mod h1:Pa9ZNPuoNu/GztvBSKk9J1cDJW6vk/n0zLtV4mgd8N8= github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 h1:FPKJS1T+clwv+OLGt13a8UjqeRuh0O4SJ3lUriThc+4= @@ -104,6 +808,7 @@ github.com/Code-Hex/go-generics-cache v1.5.1 h1:6vhZGc5M7Y/YD8cIUcY8kcuQLB4cHR7U github.com/Code-Hex/go-generics-cache v1.5.1/go.mod h1:qxcC9kRVrct9rHeiYpFWSoW1vxyillCVzX13KZG8dl4= github.com/DATA-DOG/go-sqlmock v1.4.1/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.25.0/go.mod h1:obipzmGjfSjam60XLwGfqUkJsfiheAl+TUjG+4yzyPM= github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0 h1:ErKg/3iS1AKcTkf3yixlZ54f9U1rljCkQyEXWUnIUxc= github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0/go.mod h1:yAZHSGnqScoU556rBOVkwLze6WP5N+U11RHuWaGVxwY= github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.50.0 h1:5IT7xOdq17MtcdtL/vtl6mGfzhaq4m4vpollPRmlsBQ= @@ -114,13 +819,17 @@ github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapp github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.52.0/go.mod h1:gdIm9TxRk5soClCwuB0FtdXsbqtw0aqPwBEurK9tPkw= github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM= github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= +github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= github.com/Masterminds/squirrel v1.5.4 h1:uUcX/aBc8O7Fg9kaISIUsHXdKuqehiXAMQTYX8afzqM= github.com/Masterminds/squirrel v1.5.4/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/VictoriaMetrics/fastcache v1.12.2 h1:N0y9ASrJ0F6h0QaC3o6uJb3NIZ9VKLjCM7NQbSmF7WI= github.com/VictoriaMetrics/fastcache v1.12.2/go.mod h1:AmC+Nzz1+3G2eCPapF6UcsnkThDcMsQicp4xDukwJYI= +github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9/go.mod h1:JynElWSGnm/4RlzPXRlREEwqTHAN3T56Bv2ITsFT3gY= +github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19/go.mod h1:T13YZdzov6OU0A1+RfKZiZN9ca6VeKdBdyDV+BY97Tk= +github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= +github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b/go.mod h1:1KcenG0jGWcpt8ov532z81sp/kMMUG485J2InIOyADM= github.com/alecthomas/kingpin v1.3.8-0.20210301060133-17f40c25f497/go.mod h1:b6br6/pDFSfMkBgC96TbpOji05q5pa+v5rIlS0Y6XtI= github.com/alecthomas/kingpin/v2 v2.4.0 h1:f48lwail6p8zpO1bC4TxtqACaGqHYA22qkHjHpqDjYY= github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= @@ -137,9 +846,14 @@ github.com/aliyun/aliyun-oss-go-sdk v3.0.2+incompatible h1:8psS8a+wKfiLt1iVDX79F github.com/aliyun/aliyun-oss-go-sdk v3.0.2+incompatible/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8= github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8= github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= +github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/andybalholm/brotli v1.1.1 h1:PR2pgnyFznKEugtsUo0xLdDop5SKXd5Qf5ysW+7XdTA= github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/apache/arrow/go/v10 v10.0.1/go.mod h1:YvhnlEePVnBS4+0z3fhPfUy7W1Ikj0Ih0vcRo/gZ1M0= +github.com/apache/arrow/go/v11 v11.0.0/go.mod h1:Eg5OsL5H+e299f7u5ssuXsuHQVEGC4xei5aX110hRiI= +github.com/apache/arrow/go/v12 v12.0.0/go.mod h1:d+tV/eHZZ7Dz7RPrFKtPK02tpr+c9/PEd/zm8mDS9Vg= +github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= @@ -151,32 +865,40 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3d github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/aws/aws-sdk-go v1.55.7 h1:UJrkFq7es5CShfBwlWAC8DA077vp8PyVbQd3lqLiztE= github.com/aws/aws-sdk-go v1.55.7/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= -github.com/aws/aws-sdk-go-v2 v1.36.3 h1:mJoei2CxPutQVxaATCzDUjcZEjVRdpsiiXi2o38yqWM= -github.com/aws/aws-sdk-go-v2 v1.36.3/go.mod h1:LLXuLpgzEbD766Z5ECcRmi8AzSwfZItDtmABVkRLGzg= +github.com/aws/aws-sdk-go-v2 v1.38.3 h1:B6cV4oxnMs45fql4yRH+/Po/YU+597zgWqvDpYMturk= +github.com/aws/aws-sdk-go-v2 v1.38.3/go.mod h1:sDioUELIUO9Znk23YVmIk86/9DOpkbyyVb1i/gUNFXY= github.com/aws/aws-sdk-go-v2/config v1.29.15 h1:I5XjesVMpDZXZEZonVfjI12VNMrYa38LtLnw4NtY5Ss= github.com/aws/aws-sdk-go-v2/config v1.29.15/go.mod h1:tNIp4JIPonlsgaO5hxO372a6gjhN63aSWl2GVl5QoBQ= github.com/aws/aws-sdk-go-v2/credentials v1.17.68 h1:cFb9yjI02/sWHBSYXAtkamjzCuRymvmeFmt0TC0MbYY= github.com/aws/aws-sdk-go-v2/credentials v1.17.68/go.mod h1:H6E+jBzyqUu8u0vGaU6POkK3P0NylYEeRZ6ynBpMqIk= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 h1:x793wxmUWVDhshP8WW2mlnXuFrO4cOd3HLBroh1paFw= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30/go.mod h1:Jpne2tDnYiFascUEs2AWHJL9Yp7A5ZVy3TNyxaAjD6M= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 h1:ZK5jHhnrioRkUNOc+hOgQKlUL5JeC3S6JgLxtQ+Rm0Q= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34/go.mod h1:p4VfIceZokChbA9FzMbRGz5OV+lekcVtHlPKEO0gSZY= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 h1:SZwFm17ZUNNg5Np0ioo/gq8Mn6u9w19Mri8DnJ15Jf0= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34/go.mod h1:dFZsC0BLo346mvKQLWmoJxT+Sjp+qcVR1tRVHQGOH9Q= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.6 h1:uF68eJA6+S9iVr9WgX1NaRGyQ/6MdIyc4JNUo6TN1FA= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.6/go.mod h1:qlPeVZCGPiobx8wb1ft0GHT5l+dc6ldnwInDFaMvC7Y= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.6 h1:pa1DEC6JoI0zduhZePp3zmhWvk/xxm4NB8Hy/Tlsgos= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.6/go.mod h1:gxEjPebnhWGJoaDdtDkA0JX46VRg1wcTHYe63OfX5pE= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 h1:eAh2A4b5IzM/lum78bZ590jy36+d/aFLgKF/4Vd1xPE= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3/go.mod h1:0yKJC/kb8sAnmlYa6Zs3QVYqaC8ug2AbnNChv5Ox3uA= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 h1:dM9/92u2F1JbDaGooxTq18wmmFzbJRfXfVfy96/1CXM= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15/go.mod h1:SwFBy2vjtA0vZbjjaFtfN045boopadnoVPhu4Fv66vY= +github.com/aws/aws-sdk-go-v2/service/dynamodb v1.50.1 h1:MXUnj1TKjwQvotPPHFMfynlUljcpl5UccMrkiauKdWI= +github.com/aws/aws-sdk-go-v2/service/dynamodb v1.50.1/go.mod h1:fe3UQAYwylCQRlGnihsqU/tTQkrc2nrW/IhWYwlW9vg= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.237.0 h1:XHE2G+yaDQql32FZt19QmQt4WuisqQJIkMUSCxeCUl8= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.237.0/go.mod h1:t11/j/nH9i6bbsPH9xc04BJOsV2nVPUqrB67/TLDsyM= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 h1:oegbebPEMA/1Jny7kvwejowCaHz1FWZAQ94WXFNCyTM= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1/go.mod h1:kemo5Myr9ac0U9JfSjMo9yHLtw+pECEHsFtJ9tqCEI8= +github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.11.6 h1:34ojKW9OV123FZ6Q8Nua3Uwy6yVTcshZ+gLE4gpMDEs= +github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.11.6/go.mod h1:sXXWh1G9LKKkNbuR0f0ZPd/IvDXlMGiag40opt4XEgY= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.0 h1:eRhU3Sh8dGbaniI6B+I48XJMrTPRkK4DKo+vqIxziOU= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.0/go.mod h1:paNLV18DZ6FnWE/bd06RIKPDIFpjuvCkGKWTG/GDBeM= +github.com/aws/aws-sdk-go-v2/service/lightsail v1.44.0 h1:QiiCqpKy0prxq+92uWfESzcb7/8Y9JAamcMOzVYLEoM= +github.com/aws/aws-sdk-go-v2/service/lightsail v1.44.0/go.mod h1:ESppxYqXQCpCY+KWl3BdkQjmsQX6zxKP39SnDtRDoU0= github.com/aws/aws-sdk-go-v2/service/sso v1.25.3 h1:1Gw+9ajCV1jogloEv1RRnvfRFia2cL6c9cuKV2Ps+G8= github.com/aws/aws-sdk-go-v2/service/sso v1.25.3/go.mod h1:qs4a9T5EMLl/Cajiw2TcbNt2UNo/Hqlyp+GiuG4CFDI= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1 h1:hXmVKytPfTy5axZ+fYbR5d0cFmC3JvwLm5kM83luako= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1/go.mod h1:MlYRNmYu/fGPoxBQVvBYr9nyr948aY/WLUvwBMBJubs= github.com/aws/aws-sdk-go-v2/service/sts v1.33.20 h1:oIaQ1e17CSKaWmUTu62MtraRWVIosn/iONMuZt0gbqc= github.com/aws/aws-sdk-go-v2/service/sts v1.33.20/go.mod h1:cQnB8CUnxbMU82JvlqjKR2HBOm3fe9pWorWBza6MBJ4= -github.com/aws/smithy-go v1.22.3 h1:Z//5NuZCSW6R4PhQ93hShNbyBbn8BWCmCVCt+Q8Io5k= -github.com/aws/smithy-go v1.22.3/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI= +github.com/aws/smithy-go v1.23.0 h1:8n6I3gXzWJB2DxBDnfxgBaSX6oe0d/t10qGz7OKqMCE= +github.com/aws/smithy-go v1.23.0/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI= github.com/baidubce/bce-sdk-go v0.9.230 h1:HzELBKiD7QAgYqZ1qHZexoI2A3Lo/6zYGQFvcUbS5cA= github.com/baidubce/bce-sdk-go v0.9.230/go.mod h1:zbYJMQwE4IZuyrJiFO8tO8NbtYiKTFTbwh4eIsqjVdg= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps= @@ -191,6 +913,8 @@ github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/bluele/gcache v0.0.2 h1:WcbfdXICg7G/DGBh1PFfcirkWOQV+v077yF1pSy3DGw= github.com/bluele/gcache v0.0.2/go.mod h1:m15KV+ECjptwSPxKhOhQoAFQVtUFjTVkc3H8o0t/fp0= +github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= +github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/bradfitz/gomemcache v0.0.0-20250403215159-8d39553ac7cf h1:TqhNAT4zKbTdLa62d2HDBFdvgSbIGB3eJE8HqhgiL9I= github.com/bradfitz/gomemcache v0.0.0-20250403215159-8d39553ac7cf/go.mod h1:r5xuitiExdLAJ09PR7vBVENGvp4ZuTBeWTGtxuX3K+c= github.com/caio/go-tdigest v3.1.0+incompatible h1:uoVMJ3Q5lXmVLCCqaMGHLBWnbGoN6Lpu7OAUPR60cds= @@ -200,7 +924,8 @@ github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyY github.com/cenkalti/backoff/v5 v5.0.2 h1:rIfFVxEf1QsI7E1ZHfp/B4DF/6QBAUhmgkxc0H7Zss8= github.com/cenkalti/backoff/v5 v5.0.2/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= @@ -218,20 +943,25 @@ github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6D github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/clbanning/mxj v1.8.4 h1:HuhwZtbyvyOw+3Z1AowPkU87JkJUSv751ELWaiTpj8I= github.com/clbanning/mxj v1.8.4/go.mod h1:BVjHeAH+rl9rs6f+QIpeRl0tfu10SXn1pUSa5PVGJng= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cncf/xds/go v0.0.0-20241223141626-cff3c89139a3/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 h1:aQ3y1lwWyqYPiWZThqv1aFbZMiM9vblcSArJRf2Irls= github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/coder/quartz v0.1.2 h1:PVhc9sJimTdKd3VbygXtS4826EOCpB1fXoRlLnCrE+s= github.com/coder/quartz v0.1.2/go.mod h1:vsiCc+AHViMKH2CQpGIpFgdHIEQsxwm8yCscqKmzbRA= +github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI= +github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= +github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE= +github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= @@ -239,8 +969,8 @@ github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8 github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cortexproject/promqlsmith v0.0.0-20250407233056-90db95b1a4e4 h1:dpo7kQ24uFSV6Zgm9/kB34TIUWjGmadlbKrM6fNfQko= github.com/cortexproject/promqlsmith v0.0.0-20250407233056-90db95b1a4e4/go.mod h1:jh6POgN18lXU133HBMfwr/1TjvBp8e5kL4ZtRsAPvGY= -github.com/cortexproject/weaveworks-common v0.0.0-20241129212437-96019edf21f1 h1:UoSixdl0sBUhfEOMpIGxFnJjp3/y/+nkw6Du7su05FE= -github.com/cortexproject/weaveworks-common v0.0.0-20241129212437-96019edf21f1/go.mod h1:7cl8fS/nivXe2DmBUUmr/3UGTJG2jVU2NRaIayR2Zjs= +github.com/cortexproject/weaveworks-common v0.0.0-20250902164925-0315015a8b9f h1:hDM26bY51+giykKRIH8TUYzEy7fn62iKfTW7vfgDuNw= +github.com/cortexproject/weaveworks-common v0.0.0-20250902164925-0315015a8b9f/go.mod h1:bls8PY13xoOKkZuRhhDdR2rNk4pfdGWCR6k2jF9s9+4= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cristalhq/hedgedhttp v0.9.1 h1:g68L9cf8uUyQKQJwciD0A1Vgbsz+QgCjuB1I8FAsCDs= github.com/cristalhq/hedgedhttp v0.9.1/go.mod h1:XkqWU6qVMutbhW68NnzjWrGtH8NUx1UfYqGYtHVKIsI= @@ -257,16 +987,17 @@ github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/r github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/dhui/dktest v0.4.3 h1:wquqUxAFdcUgabAVLvSCOKOlag5cIZuaOjYIBOWdsR0= github.com/dhui/dktest v0.4.3/go.mod h1:zNK8IwktWzQRm6I/l2Wjp7MakiyaFWv4G1hjmodmMTs= -github.com/digitalocean/godo v1.136.0 h1:DTxugljFJSMBPfEGq4KeXpnKeAHicggNqogcrw/YdZw= -github.com/digitalocean/godo v1.136.0/go.mod h1:PU8JB6I1XYkQIdHFop8lLAY9ojp6M0XcU0TWaQSxbrc= +github.com/digitalocean/godo v1.157.0 h1:ReELaS6FxXNf8gryUiVH0wmyUmZN8/NCmBX4gXd3F0o= +github.com/digitalocean/godo v1.157.0/go.mod h1:tYeiWY5ZXVpU48YaFv0M5irUFHXGorZpDNm7zzdWMzM= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v28.0.1+incompatible h1:FCHjSRdXhNRFjlHMTv4jUNlIBbTeRjrWfeFuJp7jpo0= -github.com/docker/docker v28.0.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v28.3.0+incompatible h1:ffS62aKWupCWdvcee7nBU9fhnmknOqDPaJAMtfK0ImQ= +github.com/docker/docker v28.3.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= @@ -284,22 +1015,22 @@ github.com/emersion/go-smtp v0.21.3 h1:7uVwagE8iPYE48WhNsng3RRpCUpFvNl39JGNSIyGV github.com/emersion/go-smtp v0.21.3/go.mod h1:qm27SGYgoIPRot6ubfQ/GpiPy/g3PaZAVRxiO/sDUgQ= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= +github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= +github.com/envoyproxy/go-control-plane v0.11.0/go.mod h1:VnHyVMpzcLvCFt9yUz1UnCwHLhwx1WguiVDV7pTG/tI= github.com/envoyproxy/go-control-plane v0.13.4 h1:zEqyPVyku6IvWCFwux4x9RxkLOMUL+1vC9xUFv5l2/M= github.com/envoyproxy/go-control-plane v0.13.4/go.mod h1:kDfuBlDVsSj2MjrLEtRWtHlsWIFcGyB2RMO44Dc5GZA= +github.com/envoyproxy/go-control-plane/envoy v1.32.2/go.mod h1:eR2SOX2IedqlPvmiKjUH7Wu//S602JKI7HPC/L3SRq8= github.com/envoyproxy/go-control-plane/envoy v1.32.4 h1:jb83lalDRZSpPWW2Z7Mck/8kXZ5CQAFYVjQcdVIr83A= github.com/envoyproxy/go-control-plane/envoy v1.32.4/go.mod h1:Gzjc5k8JcJswLjAx1Zm+wSYE20UrLtt7JZMWiWQXQEw= github.com/envoyproxy/go-control-plane/ratelimit v0.1.0 h1:/G9QYbddjL25KvtKTv3an9lx6VBE2cnb8wp1vEGNYGI= github.com/envoyproxy/go-control-plane/ratelimit v0.1.0/go.mod h1:Wk+tMFAFbCXaJPzVVHnPgRKdUdwW/KdbRt94AzgRee4= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= +github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= +github.com/envoyproxy/protoc-gen-validate v0.10.0/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= +github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= +github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8= github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU= github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb h1:IT4JYU7k4ikYg1SCxNI1/Tieq/NFvh6dzLdgi7eu0tM= @@ -314,6 +1045,8 @@ github.com/felixge/fgprof v0.9.5 h1:8+vR6yu2vvSKn08urWyEuxx75NWPEvybbkBirEpsbVY= github.com/felixge/fgprof v0.9.5/go.mod h1:yKl+ERSa++RYOs32d8K6WEXCB4uXdLls4ZaZPpayhMM= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= @@ -323,24 +1056,32 @@ github.com/fullstorydev/emulators/storage v0.0.0-20240401123056-edc69752f474/go. github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-chi/chi/v5 v5.2.2 h1:CMwsvRVTbXVytCk1Wd72Zy1LAsAh9GxMmSNWLHCG618= +github.com/go-chi/chi/v5 v5.2.2/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops= +github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g= +github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks= +github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= +github.com/go-fonts/liberation v0.2.0/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= +github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmnUIzUY= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A= github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= -github.com/go-jose/go-jose/v4 v4.0.5 h1:M6T8+mKZl/+fNNuFHvGIzDz7BTLQPIounk/b9dw3AaE= -github.com/go-jose/go-jose/v4 v4.0.5/go.mod h1:s3P1lRrkT8igV8D9OjyL4WRyHvjB6a4JSllnOrmmBOA= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U= +github.com/go-latex/latex v0.0.0-20210823091927-c0d11ff05a81/go.mod h1:SX0U8uGpxhq9o2S/CELCSUxEWWAuoCUcVCQWv7G2OCk= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= @@ -370,17 +1111,19 @@ github.com/go-openapi/swag v0.23.1 h1:lpsStH0n2ittzTnbaSloVZLuB5+fvSY/+hnagBjSNZ github.com/go-openapi/swag v0.23.1/go.mod h1:STZs8TbRvEQQKUA+JZNAm3EWlgaOBGpyFDqQnDHMef0= github.com/go-openapi/validate v0.24.0 h1:LdfDKwNbpB6Vn40xhTdNZAnfLECL81w+VX3BumrGD58= github.com/go-openapi/validate v0.24.0/go.mod h1:iyeX1sEufmv3nPbBdX3ieNviWnOZaJ1+zquzJEf2BAQ= +github.com/go-pdf/fpdf v0.5.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= +github.com/go-pdf/fpdf v0.6.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4= github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI= github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo= -github.com/go-resty/resty/v2 v2.16.3 h1:zacNT7lt4b8M/io2Ahj6yPypL7bqx9n1iprfQuodV+E= -github.com/go-resty/resty/v2 v2.16.3/go.mod h1:hkJtXbA2iKHzJheXYvQ8snQES5ZLGKMwQ07xAwp/fiA= +github.com/go-resty/resty/v2 v2.16.5 h1:hBKqmWrr7uRc3euHVqmh1HTHcKn99Smr7o5spptdhTM= +github.com/go-resty/resty/v2 v2.16.5/go.mod h1:hkJtXbA2iKHzJheXYvQ8snQES5ZLGKMwQ07xAwp/fiA= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-viper/mapstructure/v2 v2.3.0 h1:27XbWsHIqhbdR5TIC911OfYvgSaW93HM+dX7970Q7jk= -github.com/go-viper/mapstructure/v2 v2.3.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= +github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/go-zookeeper/zk v1.0.4 h1:DPzxraQx7OrPyXq2phlGlNSIyWEsAox0RJmjTseMV6I= github.com/go-zookeeper/zk v1.0.4/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= @@ -388,6 +1131,7 @@ github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJA github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM= github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY= +github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4= github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/goccy/go-yaml v1.9.5/go.mod h1:U/jl18uSupI5rdI2jmuCswEA2htH9eXfferR3KfscvA= @@ -408,14 +1152,17 @@ github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeD github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang-migrate/migrate/v4 v4.18.1 h1:JML/k+t4tpHCpQTCAD62Nu43NUFzHY4CV3uAuvHGC+Y= github.com/golang-migrate/migrate/v4 v4.18.1/go.mod h1:HAX6m3sQgcdO81tdjn5exv20+3Kb13cmGli1hrD6hks= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= +github.com/golang/glog v1.2.4/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ= github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= @@ -441,6 +1188,7 @@ github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= @@ -451,6 +1199,7 @@ github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Z github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw= github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -468,6 +1217,7 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= @@ -478,6 +1228,7 @@ github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXi github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc= github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= @@ -499,11 +1250,20 @@ github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7/go.mod h1:czg5+yv1E0Z github.com/google/pprof v0.0.0-20250607225305-033d6d78b36a h1://KbezygeMJZCSHH+HgUZiTeSoiuFspbMg1ge+eFj18= github.com/google/pprof v0.0.0-20250607225305-033d6d78b36a/go.mod h1:5hDyRhoBCxViHszMt12TnOpEI4VVi+U8Gm9iphldiMA= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/s2a-go v0.1.0/go.mod h1:OJpEgntRZo8ugHpF9hkoLJbS5dSI20XZeXJ9JVywLlM= +github.com/google/s2a-go v0.1.3/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= +github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= +github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= github.com/googleapis/enterprise-certificate-proxy v0.3.6 h1:GW/XbdyBFQ8Qe+YAmFU9uHLo7OnF5tL52HFAgMmyrf4= github.com/googleapis/enterprise-certificate-proxy v0.3.6/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= @@ -513,11 +1273,19 @@ github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0 github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= -github.com/googleapis/gax-go/v2 v2.14.1 h1:hb0FFeiPaQskmvakKu5EbCbpntQn48jyHuvrkurSS/Q= -github.com/googleapis/gax-go/v2 v2.14.1/go.mod h1:Hb/NubMaVM88SrNkvl8X/o8XWwDJEPqouaLeN2IUxoA= +github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= +github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= +github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= +github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= +github.com/googleapis/gax-go/v2 v2.8.0/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= +github.com/googleapis/gax-go/v2 v2.10.0/go.mod h1:4UOEnMCrxsSqQ940WnTiD6qJ63le2ev3xfyagutxiPw= +github.com/googleapis/gax-go/v2 v2.11.0/go.mod h1:DxmR61SGKkGLa2xigwuZIQpkCI2S5iydzRfb3peWZJI= +github.com/googleapis/gax-go/v2 v2.14.2 h1:eBLnkZ9635krYIPD+ag1USrOAI0Nr0QYF3+/3GqO0k0= +github.com/googleapis/gax-go/v2 v2.14.2/go.mod h1:ON64QhlJkhVtSqp4v1uaK92VyZ2gmvDQsweuyLV+8+w= +github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= -github.com/gophercloud/gophercloud/v2 v2.6.0 h1:XJKQ0in3iHOZHVAFMXq/OhjCuvvG+BKR0unOqRfG1EI= -github.com/gophercloud/gophercloud/v2 v2.6.0/go.mod h1:Ki/ILhYZr/5EPebrPL9Ej+tUg4lqx71/YH2JWVeU+Qk= +github.com/gophercloud/gophercloud/v2 v2.7.0 h1:o0m4kgVcPgHlcXiWAjoVxGd8QCmvM5VU+YM71pFbn0E= +github.com/gophercloud/gophercloud/v2 v2.7.0/go.mod h1:Ki/ILhYZr/5EPebrPL9Ej+tUg4lqx71/YH2JWVeU+Qk= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo= @@ -532,11 +1300,14 @@ github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.2 h1:sGm2vDRFUrQJO/Veii4h4z github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.2/go.mod h1:wd1YpapPLivG6nQgbf7ZkG1hhSOXDhhn4MLTknx2aAc= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo= github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI= github.com/hashicorp/consul/api v1.12.0/go.mod h1:6pVBMo0ebnYdt2S3H87XhekM/HHrUoTD2XXb/VrZVy0= -github.com/hashicorp/consul/api v1.31.2 h1:NicObVJHcCmyOIl7Z9iHPvvFrocgTYo9cITSGg0/7pw= -github.com/hashicorp/consul/api v1.31.2/go.mod h1:Z8YgY0eVPukT/17ejW+l+C7zJmKwgPHtjU1q16v/Y40= +github.com/hashicorp/consul/api v1.32.0 h1:5wp5u780Gri7c4OedGEPzmlUEzi0g2KyiPphSr6zjVg= +github.com/hashicorp/consul/api v1.32.0/go.mod h1:Z8YgY0eVPukT/17ejW+l+C7zJmKwgPHtjU1q16v/Y40= github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= github.com/hashicorp/consul/sdk v0.16.1 h1:V8TxTnImoPD5cj0U9Spl0TUxcytjcbbJeADFF07KdHg= github.com/hashicorp/consul/sdk v0.16.1/go.mod h1:fSXvwxB2hmh1FMZCNl6PwX0Q/1wdWtHJcZ7Ea5tns0s= @@ -556,6 +1327,8 @@ github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVH github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-metrics v0.5.4 h1:8mmPiIJkTPPEbAiV97IxdAGNdRdaWwVap1BU6elejKY= +github.com/hashicorp/go-metrics v0.5.4/go.mod h1:CG5yz4NZ/AI/aQt9Ucm/vdBnbh7fvmv4lxZ350i+QQI= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= @@ -594,12 +1367,14 @@ github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpT github.com/hashicorp/serf v0.9.7/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= -github.com/hetznercloud/hcloud-go/v2 v2.19.1 h1:UU/7h3uc/rdgspM8xkQF7wokmwZXePWDXcLqrQRRzzY= -github.com/hetznercloud/hcloud-go/v2 v2.19.1/go.mod h1:r5RTzv+qi8IbLcDIskTzxkFIji7Ovc8yNgepQR9M+UA= +github.com/hetznercloud/hcloud-go/v2 v2.21.1 h1:IH3liW8/cCRjfJ4cyqYvw3s1ek+KWP8dl1roa0lD8JM= +github.com/hetznercloud/hcloud-go/v2 v2.21.1/go.mod h1:XOaYycZJ3XKMVWzmqQ24/+1V7ormJHmPdck/kxrNnQA= github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= github.com/huaweicloud/huaweicloud-sdk-go-obs v3.25.4+incompatible h1:yNjwdvn9fwuN6Ouxr0xHM0cVu03YMUWUyFmu2van/Yc= github.com/huaweicloud/huaweicloud-sdk-go-obs v3.25.4+incompatible/go.mod h1:l7VUhRbTKCzdOacdT4oWCwATKyvZqUOlOqr0Ous3k4s= +github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= +github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20230524184225-eabc099b10ab/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw= @@ -626,13 +1401,19 @@ github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/X github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/keybase/go-keychain v0.0.1 h1:way+bWYa6lDppZoZcgMbYsvC7GxljxrskdNInRtuthU= github.com/keybase/go-keychain v0.0.1/go.mod h1:PdEILRW3i9D8JcdM+FmY6RwkHGnhHxXwkPPMeUgOK1k= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= +github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE= github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= github.com/knadh/koanf/maps v0.1.2 h1:RBfmAW5CnZT+PJ1CVc1QSJKf4Xu9kxfQgYVQSu8hpbo= @@ -649,6 +1430,7 @@ github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= @@ -670,8 +1452,13 @@ github.com/leesper/go_rng v0.0.0-20190531154944-a612b043e353/go.mod h1:N0SVk0uhy github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/linode/linodego v1.47.0 h1:6MFNCyzWbr8Rhl4r7d5DwZLwxvFIsM4ARH6W0KS/R0U= -github.com/linode/linodego v1.47.0/go.mod h1:vyklQRzZUWhFVBZdYx4dcYJU/gG9yKB9VUcUs6ub0Lk= +github.com/linode/linodego v1.52.2 h1:N9ozU27To1LMSrDd8WvJZ5STSz1eGYdyLnxhAR/dIZg= +github.com/linode/linodego v1.52.2/go.mod h1:bI949fZaVchjWyKIA08hNyvAcV6BAS+PM2op3p7PAWA= +github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/lyft/protoc-gen-star/v2 v2.0.1/go.mod h1:RcCdONR2ScXaYnQC5tUzxzlpA3WVYF7/opLeUgcQs/o= +github.com/lyft/protoc-gen-star/v2 v2.0.3/go.mod h1:amey7yeodaJhXSbf/TlLvWiqQfLOSpEk//mLlc+axEk= +github.com/lyft/protoc-gen-star/v2 v2.0.4-0.20230330145011-496ad1ac90a4/go.mod h1:amey7yeodaJhXSbf/TlLvWiqQfLOSpEk//mLlc+axEk= github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -693,8 +1480,12 @@ github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcME github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= @@ -708,10 +1499,14 @@ github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKju github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/miekg/dns v1.1.66 h1:FeZXOS3VCVsKnEAd+wBkjMC3D2K+ww66Cq3VnCINuJE= github.com/miekg/dns v1.1.66/go.mod h1:jGFzBsSNbJw6z1HYut1RKBKHA9PBdxeHrZG8J+gC2WE= +github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= +github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE= +github.com/minio/crc64nvme v1.0.1 h1:DHQPrYPdqK7jQG/Ls5CTBZWeex/2FMS3G5XGkycuFrY= +github.com/minio/crc64nvme v1.0.1/go.mod h1:eVfm2fAzLlxMdUGc0EEBGSMmPwmXD5XiNRpnu9J3bvg= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= -github.com/minio/minio-go/v7 v7.0.80 h1:2mdUHXEykRdY/BigLt3Iuu1otL0JTogT0Nmltg0wujk= -github.com/minio/minio-go/v7 v7.0.80/go.mod h1:84gmIilaX4zcvAWWzJ5Z1WI5axN+hAbM5w25xf8xvC0= +github.com/minio/minio-go/v7 v7.0.93 h1:lAB4QJp8Nq3vDMOU0eKgMuyBiEGMNlXQ5Glc8qAxqSU= +github.com/minio/minio-go/v7 v7.0.93/go.mod h1:71t2CqDt3ThzESgZUlU1rBN54mksGGlkLcFgguDnnAc= github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= @@ -754,8 +1549,8 @@ github.com/ncw/swift v1.0.53/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= -github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= +github.com/oklog/run v1.2.0 h1:O8x3yXwah4A73hJdlrwo/2X6J62gE5qTMusH0dvz60E= +github.com/oklog/run v1.2.0/go.mod h1:mgDbKRSwPhJfesJ4PntqFUbKQRZ50NgmZTSPlFA0YFk= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/oklog/ulid/v2 v2.1.1 h1:suPZ4ARWLOJLegGFiZZ1dFAkqzhMjL3J1TzI+5wHz8s= @@ -764,14 +1559,14 @@ github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8= github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.128.0 h1:hZa4FkI2JhYC0tkiwOepnHyyfWzezz3FfCmt88nWJa0= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.128.0/go.mod h1:sLbOuJEFckPdw4li0RtWpoSsMeppcck3s/cmzPyKAgc= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.128.0 h1:+rUULr4xqOJjZK3SokFmRYzsiPq5onoWoSv3He4aaus= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.128.0/go.mod h1:Fh2SXPeFkr4J97w9CV/apFAib8TC9Hi0P08xtiT7Lng= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.128.0 h1:8OWwRSdIhm3DY3PEYJ0PtSEz1a1OjL0fghLXSr14JMk= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.128.0/go.mod h1:32OeaysZe4vkSmD1LJ18Q1DfooryYqpSzFNmz+5A5RU= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.128.0 h1:9wVFaWEhgV8WQD+nP662nHNaQIkmyF57KRhtsqlaWEI= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.128.0/go.mod h1:Yak3vQIvwYQiAO83u+zD9ujdCmpcDL7JSfg2YK+Mwn4= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.129.0 h1:2pzb6bC/AAfciC9DN+8d7Y8Rsk8ZPCfp/ACTfZu87FQ= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.129.0/go.mod h1:tIE4dzdxuM7HnFeYA6sj5zfLuUA/JxzQ+UDl1YrHvQw= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.129.0 h1:ydkfqpZ5BWZfEJEs7OUhTHW59og5aZspbUYxoGcAEok= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.129.0/go.mod h1:oA+49dkzmhUx0YFC9JXGuPPSBL0TOTp6jkv7qSr2n0Q= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.129.0 h1:AOVxBvCZfTPj0GLGqBVHpAnlC9t9pl1JXUQXymHliiY= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.129.0/go.mod h1:0CAJ32V/bCUBhNTEvnN9wlOG5IsyZ+Bmhe9e3Eri7CU= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.129.0 h1:yDLSAoIi3jNt4R/5xN4IJ9YAg1rhOShgchlO/ESv8EY= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.129.0/go.mod h1:IXQHbTPxqNcuu44FvkyvpYJ6Qy4wh4YsCVkKsp0Flzo= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= @@ -786,8 +1581,8 @@ github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYr github.com/oracle/oci-go-sdk/v65 v65.93.1 h1:lIvy/6aQOUenQI+cxXH1wDBJeXFPO9Du3CaomXeYFaY= github.com/oracle/oci-go-sdk/v65 v65.93.1/go.mod h1:u6XRPsw9tPziBh76K7GrrRXPa8P8W3BQeqJ6ZZt9VLA= github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0= -github.com/ovh/go-ovh v1.7.0 h1:V14nF7FwDjQrZt9g7jzcvAAQ3HN6DNShRFRMC3jLoPw= -github.com/ovh/go-ovh v1.7.0/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC7c= +github.com/ovh/go-ovh v1.9.0 h1:6K8VoL3BYjVV3In9tPJUdT7qMx9h0GExN9EXx1r2kKE= +github.com/ovh/go-ovh v1.9.0/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC7c= github.com/parquet-go/parquet-go v0.25.1 h1:l7jJwNM0xrk0cnIIptWMtnSnuxRkwq53S+Po3KG8Xgo= github.com/parquet-go/parquet-go v0.25.1/go.mod h1:AXBuotO1XiBtcqJb/FKFyjBG4aqa3aQAAWF3ZPzCanY= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= @@ -796,14 +1591,22 @@ github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144T github.com/pborman/getopt v0.0.0-20170112200414-7148bc3a4c30/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml/v2 v2.0.5/go.mod h1:OMHamSCAODeSsVrwwvcJOaoN0LIUIaFVNZzmWyNfXas= +github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c h1:dAMKvw0MlJT1GshSTtih8C2gDs04w8dReiOGXrGLNoY= +github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c/go.mod h1:RqIHx9QI14HlwKwm98g9Re5prTQ6LdeRQn+gXJFxsJM= +github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY= +github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= +github.com/phpdave11/gofpdi v1.0.13/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= +github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU= github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= @@ -814,8 +1617,8 @@ github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndr github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= -github.com/prometheus-community/parquet-common v0.0.0-20250716185251-4cfa597e936c h1:yDtT3c2klcWJj6A0osq72qM8rd1ohtl/J3rHD3FHuNw= -github.com/prometheus-community/parquet-common v0.0.0-20250716185251-4cfa597e936c/go.mod h1:MbAv/yCv9GORLj0XvXgRF913R9Jc04+BvVq4VJpPCi0= +github.com/prometheus-community/parquet-common v0.0.0-20250827225610-65f0b68d35e6 h1:jWcDrCpAU047f2NTGtm3vRPqJ8skDOkdKCC5sSfSN4Q= +github.com/prometheus-community/parquet-common v0.0.0-20250827225610-65f0b68d35e6/go.mod h1:MbAv/yCv9GORLj0XvXgRF913R9Jc04+BvVq4VJpPCi0= github.com/prometheus-community/prom-label-proxy v0.11.1 h1:jX+m+BQCNM0z3/P6V6jVxbiDKgugvk91SaICD6bVhT4= github.com/prometheus-community/prom-label-proxy v0.11.1/go.mod h1:uTeQW+wZ/VPV1LL3IPfvUE++wR2nPLex+Y4RE38Cpis= github.com/prometheus/alertmanager v0.28.1 h1:BK5pCoAtaKg01BYRUJhEDV1tqJMEtYBGzPw8QdvnnvA= @@ -826,22 +1629,29 @@ github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3O github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= -github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= +github.com/prometheus/client_golang v1.23.0-rc.1 h1:Is/nGODd8OsJlNQSybeYBwY/B6aHrN7+QwVUYutHSgw= +github.com/prometheus/client_golang v1.23.0-rc.1/go.mod h1:i/o0R9ByOnHX0McrTMTyhYvKE4haaf2mW08I+jGAjEE= +github.com/prometheus/client_golang/exp v0.0.0-20250914183048-a974e0d45e0a h1:RF1vfKM34/3DbGNis22BGd6sDDY3XBi0eM7pYqmOEO0= +github.com/prometheus/client_golang/exp v0.0.0-20250914183048-a974e0d45e0a/go.mod h1:FGJuwvfcPY0V5enm+w8zF1RNS062yugQtPPQp1c4Io4= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.63.0 h1:YR/EIY1o3mEFP/kZCD7iDMnLPlGyuU2Gb3HIcXnA98k= -github.com/prometheus/common v0.63.0/go.mod h1:VVFF/fBIoToEnWRVkYoXEkq3R3paCoxG9PXP74SnV18= +github.com/prometheus/common v0.65.1-0.20250703115700-7f8b2a0d32d3 h1:R/zO7ombSHCI8bjQusgCMSL+cE669w5/R2upq5WlPD0= +github.com/prometheus/common v0.65.1-0.20250703115700-7f8b2a0d32d3/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8= github.com/prometheus/exporter-toolkit v0.14.0 h1:NMlswfibpcZZ+H0sZBiTjrA3/aBFHkNZqE+iCj5EmRg= github.com/prometheus/exporter-toolkit v0.14.0/go.mod h1:Gu5LnVvt7Nr/oqTBUC23WILZepW0nffNo10XdhQcwWA= +github.com/prometheus/otlptranslator v0.0.0-20250731173911-a9673827589a h1:r2csuCATbgDz2Nk2PkKo7b6x7ErrF3NMmxwH0fifqN8= +github.com/prometheus/otlptranslator v0.0.0-20250731173911-a9673827589a/go.mod h1:P8AwMgdD7XEr6QRUJ2QWLpiAZTgTE2UYgjlu3svompI= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= @@ -849,27 +1659,38 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg= github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= -github.com/prometheus/sigv4 v0.1.2 h1:R7570f8AoM5YnTUPFm3mjZH5q2k4D+I/phCWvZ4PXG8= -github.com/prometheus/sigv4 v0.1.2/go.mod h1:GF9fwrvLgkQwDdQ5BXeV9XUSCH/IPNqzvAoaohfjqMU= +github.com/prometheus/prometheus v0.305.1-0.20250808023455-1e4144a496fb h1:azXJoaVT+S7PRdbdUwtyivhaGq++ZF5YTkk1XlTaZkw= +github.com/prometheus/prometheus v0.305.1-0.20250808023455-1e4144a496fb/go.mod h1:nFT/lsJGZPCe1mC6uLIoDuK2bP9JO9DBHIDPQsuZucQ= +github.com/prometheus/sigv4 v0.2.0 h1:qDFKnHYFswJxdzGeRP63c4HlH3Vbn1Yf/Ao2zabtVXk= +github.com/prometheus/sigv4 v0.2.0/go.mod h1:D04rqmAaPPEUkjRQxGqjoxdyJuyCh6E0M18fZr0zBiE= github.com/puzpuzpuz/xsync/v3 v3.5.1 h1:GJYJZwO6IdxN/IKbneznS6yPkVC+c3zyY/j19c++5Fg= github.com/puzpuzpuz/xsync/v3 v3.5.1/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA= +github.com/rantav/go-grpc-channelz v0.0.4 h1:8GvqhA6siQVBsZYzal3yHhyJ9YiHEJx7RtSH2Jvm9Co= +github.com/rantav/go-grpc-channelz v0.0.4/go.mod h1:HodrRmnnH1zXcEEfK7EJrI23YMPMT7uvyAYkq2JUIcI= github.com/redis/go-redis/v9 v9.8.0 h1:q3nRvjrlge/6UD7eTu/DSg2uYiU2mCL0G/uzBWqhicI= github.com/redis/go-redis/v9 v9.8.0/go.mod h1:huWgSWd8mW6+m0VPhJjSSQ+d6Nh1VICQ6Q5lHuCH/Iw= github.com/redis/rueidis v1.0.61 h1:AkbCMeTyjFSQraGaNYncg3unMCTYGr6Y8WOqGhDOQu4= github.com/redis/rueidis v1.0.61/go.mod h1:Lkhr2QTgcoYBhxARU7kJRO8SyVlgUuEkcJO1Y8MCluA= +github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA= github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/rs/xid v1.6.0 h1:fV591PaemRlL6JfRxGDEPl69wICngIQ3shQtzfy2gxU= github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0= +github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= +github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/sagikazarmark/crypt v0.6.0/go.mod h1:U8+INwJo3nBv1m6A/8OBXAq7Jnpspk5AxSgDyEQcea8= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.32 h1:4+LP7qmsLSGbmc66m1s5dKRMBwztRppfxFKlYqYte/c= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.32/go.mod h1:kzh+BSAvpoyHHdHBCDhmSWtBc1NbLMZ2lWHqnBoxFks= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.33 h1:KhF0WejiUTDbL5X55nXowP7zNopwpowa6qaMAWyIE+0= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.33/go.mod h1:792k1RTU+5JeMXm35/e2Wgp71qPH/DmDoZrRc+EFZDk= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/segmentio/fasthash v1.0.3 h1:EI9+KE1EwvMLBWwjpRDc+fEM+prwxDYbslddQGtrmhM= @@ -891,8 +1712,11 @@ github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= github.com/sony/gobreaker v1.0.0 h1:feX5fGGXSl3dYd4aHZItw+FpHLvvoaqkawKjVNiFMNQ= github.com/sony/gobreaker v1.0.0/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/afero v1.8.2/go.mod h1:CtAatgMJh6bJEIs48Ay/FOnkljP3WeGUG0MC1RfAqwo= +github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= +github.com/spf13/afero v1.10.0/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= @@ -900,8 +1724,8 @@ github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0 github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.13.0/go.mod h1:Icm2xNL3/8uyh/wFuB1jI7TiTNKp8632Nwegu+zgdYw= -github.com/spiffe/go-spiffe/v2 v2.5.0 h1:N2I01KCUkv1FAjZXJMwh95KK1ZIQLYbPfhaxw8WS0hE= -github.com/spiffe/go-spiffe/v2 v2.5.0/go.mod h1:P+NxobPc6wXhVtINNtFjNWGBTreew1GBUCwT2wPmb7g= +github.com/stackitcloud/stackit-sdk-go/core v0.17.2 h1:jPyn+i8rkp2hM80+hOg0B/1EVRbMt778Tr5RWyK1m2E= +github.com/stackitcloud/stackit-sdk-go/core v0.17.2/go.mod h1:8KIw3czdNJ9sdil9QQimxjR6vHjeINFrRv0iZ67wfn0= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= @@ -919,6 +1743,7 @@ github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1F github.com/stretchr/testify v1.7.5/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= @@ -928,14 +1753,14 @@ github.com/tencentyun/cos-go-sdk-v5 v0.7.66 h1:O4O6EsozBoDjxWbltr3iULgkI7WPj/BFN github.com/tencentyun/cos-go-sdk-v5 v0.7.66/go.mod h1:8+hG+mQMuRP/OIS9d83syAvXvrMj9HhkND6Q1fLghw0= github.com/thanos-community/galaxycache v0.0.0-20211122094458-3a32041a1f1e h1:f1Zsv7OAU9iQhZwigp50Yl38W10g/vd5NC8Rdk1Jzng= github.com/thanos-community/galaxycache v0.0.0-20211122094458-3a32041a1f1e/go.mod h1:jXcofnrSln/cLI6/dhlBxPQZEEQHVPCcFaH75M+nSzM= -github.com/thanos-io/objstore v0.0.0-20241111205755-d1dd89d41f97 h1:VjG0mwhN1DkncwDHFvrpd12/2TLfgYNRmEQA48ikp+0= -github.com/thanos-io/objstore v0.0.0-20241111205755-d1dd89d41f97/go.mod h1:vyzFrBXgP+fGNG2FopEGWOO/zrIuoy7zt3LpLeezRsw= -github.com/thanos-io/promql-engine v0.0.0-20250611170940-015ebeb7b5ff h1:obQDLbgnae6rLPngWwQ6q/ifQZeDEmVvxHIJ6arJCDs= -github.com/thanos-io/promql-engine v0.0.0-20250611170940-015ebeb7b5ff/go.mod h1:IQjuIvDzOOVE2MGDs88Q65GYmmKrpmIsDkMVOqs5reo= -github.com/thanos-io/thanos v0.39.2 h1:edN03y7giEc6lD17HJhYcv8ELapXxElmhJnFIYJ2GqQ= -github.com/thanos-io/thanos v0.39.2/go.mod h1:bvUPJNIx2LBXme6yBinRiGqQinxlGikLlK7PGeFQPkQ= -github.com/thanos-io/thanos-prometheus v0.0.0-20250610133519-082594458a88 h1:5uf08MPb6xrVo4rxmBDh9/1SLthbZGY9zLeF3oMixh8= -github.com/thanos-io/thanos-prometheus v0.0.0-20250610133519-082594458a88/go.mod h1:WEq2ogBPZoLjj9x5K67VEk7ECR0nRD9XCjaOt1lsYck= +github.com/thanos-io/objstore v0.0.0-20250722142242-922b22272ee3 h1:P301Anc27aVL7Ls88el92j+qW3PJp8zmiDl+kOUZv3A= +github.com/thanos-io/objstore v0.0.0-20250722142242-922b22272ee3/go.mod h1:uDHLkMKOGDAnlN75EAz8VrRzob1+VbgYSuUleatWuF0= +github.com/thanos-io/promql-engine v0.0.0-20250924193140-e9123dc11264 h1:sOmANo4XVhem4VgvI9w05DBwqMex/qw+cDjuHW2FKWw= +github.com/thanos-io/promql-engine v0.0.0-20250924193140-e9123dc11264/go.mod h1:MOFN0M1nDMcWZg1t4iF39sOard/K4SWgO/HHSODeDIc= +github.com/thanos-io/thanos v0.39.3-0.20250729120336-88d0ae8071cb h1:z/ePbn3lo/D4vdHGH8hpa2kgH9M6iLq0kOFtZwuelKM= +github.com/thanos-io/thanos v0.39.3-0.20250729120336-88d0ae8071cb/go.mod h1:gGUG3TDEoRSjTFVs/QO6QnQIILRgNF0P9l7BiiMfmHw= +github.com/tinylib/msgp v1.3.0 h1:ULuf7GPooDaIlbyvgAxBV/FI7ynli6LZ1/nVUNu+0ww= +github.com/tinylib/msgp v1.3.0/go.mod h1:ykjzy2wzgrlvpDCRc4LA8UXy6D8bzMSuAF3WD57Gok0= github.com/tjhop/slog-gokit v0.1.4 h1:uj/vbDt3HaF0Py8bHPV4ti/s0utnO0miRbO277FLBKM= github.com/tjhop/slog-gokit v0.1.4/go.mod h1:Bbu5v2748qpAWH7k6gse/kw3076IJf6owJmh7yArmJs= github.com/trivago/tgo v1.0.7 h1:uaWH/XIy9aWYWpjm2CU3RpcqZXmX2ysQ9/Go+d9gyrM= @@ -962,10 +1787,12 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yuin/gopher-lua v1.1.1 h1:kYKnWBjvbNP4XLT3+bPEwAXJx262OhaHDWDVOPjL46M= github.com/yuin/gopher-lua v1.1.1/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw= -github.com/zeebo/errs v1.4.0 h1:XNdoD/RRMKP7HD0UhJnIzUy74ISdGGxURlYG8HSWSfM= -github.com/zeebo/errs v1.4.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4= +github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= +github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= go.etcd.io/etcd/api/v3 v3.5.4/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A= go.etcd.io/etcd/api/v3 v3.5.17 h1:cQB8eb8bxwuxOilBpMJAEo8fAONyrdXTHUNcMd8yT1w= go.etcd.io/etcd/api/v3 v3.5.17/go.mod h1:d1hvkRuXkts6PmaYk2Vrgqbv7H4ADfAKhyJqHNLJCB4= @@ -989,44 +1816,45 @@ go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/collector/component v1.34.0 h1:YONg7FaZ5zZbj5cLdARvwtMNuZHunuyxw2fWe5fcWqc= -go.opentelemetry.io/collector/component v1.34.0/go.mod h1:GvolsSVZskXuyfQdwYacqeBSZe/1tg4RJ0YK55KSvDA= -go.opentelemetry.io/collector/component/componentstatus v0.128.0 h1:0lEYHgUQEMMkl5FLtMgDH8lue4B3auElQINzGIWUya4= -go.opentelemetry.io/collector/component/componentstatus v0.128.0/go.mod h1:8vVO6JSV+edmiezJsQzW7aKQ7sFLIN6S3JawKBI646o= -go.opentelemetry.io/collector/component/componenttest v0.128.0 h1:MGNh5lQQ0Qmz2SmNwOqLJYaWMDkMLYj/51wjMzTBR34= -go.opentelemetry.io/collector/component/componenttest v0.128.0/go.mod h1:hALNxcacqOaX/Gm/dE7sNOxAEFj41SbRqtvF57Yd6gs= -go.opentelemetry.io/collector/confmap v1.34.0 h1:PG4sYlLxgCMnA5F7daKXZV+NKjU1IzXBzVQeyvcwyh0= -go.opentelemetry.io/collector/confmap v1.34.0/go.mod h1:BbAit8+hAJg5vyFBQoDh9vOXOH8UzCdNu91jCh+b72E= -go.opentelemetry.io/collector/confmap/xconfmap v0.128.0 h1:hcVKU45pjC+PLz7xUc8kwSlR5wsN2w8hs9midZ3ez10= -go.opentelemetry.io/collector/confmap/xconfmap v0.128.0/go.mod h1:2928x4NAAu1CysfzLbEJE6MSSDB/gOYVq6YRGWY9LmM= -go.opentelemetry.io/collector/consumer v1.34.0 h1:oBhHH6mgViOGhVDPozE+sUdt7jFBo2Hh32lsSr2L3Tc= -go.opentelemetry.io/collector/consumer v1.34.0/go.mod h1:DVMCb56ZBlPNcmo0lSJKn3rp18oyZQCedRE4GKIMI+Q= -go.opentelemetry.io/collector/consumer/consumertest v0.128.0 h1:x50GB0I/QvU3sQuNCap5z/P2cnq2yHoRJ/8awkiT87w= -go.opentelemetry.io/collector/consumer/consumertest v0.128.0/go.mod h1:Wb3IAbMY/DOIwJPy81PuBiW2GnKoNIz4THE7wfJwovE= -go.opentelemetry.io/collector/consumer/xconsumer v0.128.0 h1:4E+KTdCjkRS3SIw0bsv5kpv9XFXHf8x9YiPEuxBVEHY= -go.opentelemetry.io/collector/consumer/xconsumer v0.128.0/go.mod h1:OmzilL/qbjCzPMHay+WEA7/cPe5xuX7Jbj5WPIpqaMo= -go.opentelemetry.io/collector/featuregate v1.34.0 h1:zqDHpEYy1UeudrfUCvlcJL2t13dXywrC6lwpNZ5DrCU= -go.opentelemetry.io/collector/featuregate v1.34.0/go.mod h1:Y/KsHbvREENKvvN9RlpiWk/IGBK+CATBYzIIpU7nccc= -go.opentelemetry.io/collector/internal/telemetry v0.128.0 h1:ySEYWoY7J8DAYdlw2xlF0w+ODQi3AhYj7TRNflsCbx8= -go.opentelemetry.io/collector/internal/telemetry v0.128.0/go.mod h1:572B/iJqjauv3aT+zcwnlNWBPqM7+KqrYGSUuOAStrM= -go.opentelemetry.io/collector/pdata v1.34.0 h1:2vwYftckXe7pWxI9mfSo+tw3wqdGNrYpMbDx/5q6rw8= -go.opentelemetry.io/collector/pdata v1.34.0/go.mod h1:StPHMFkhLBellRWrULq0DNjv4znCDJZP6La4UuC+JHI= -go.opentelemetry.io/collector/pdata/pprofile v0.128.0 h1:6DEtzs/liqv/ukz2EHbC5OMaj2V6K2pzuj/LaRg2YmY= -go.opentelemetry.io/collector/pdata/pprofile v0.128.0/go.mod h1:bVVRpz+zKFf1UCCRUFqy8LvnO3tHlXKkdqW2d+Wi/iA= -go.opentelemetry.io/collector/pdata/testdata v0.128.0 h1:5xcsMtyzvb18AnS2skVtWreQP1nl6G3PiXaylKCZ6pA= -go.opentelemetry.io/collector/pdata/testdata v0.128.0/go.mod h1:9/VYVgzv3JMuIyo19KsT3FwkVyxbh3Eg5QlabQEUczA= -go.opentelemetry.io/collector/pipeline v0.128.0 h1:WgNXdFbyf/QRLy5XbO/jtPQosWrSWX/TEnSYpJq8bgI= -go.opentelemetry.io/collector/pipeline v0.128.0/go.mod h1:TO02zju/K6E+oFIOdi372Wk0MXd+Szy72zcTsFQwXl4= -go.opentelemetry.io/collector/processor v1.34.0 h1:5pwXIG12XXxdkJ8F68e2cBEjEnFlCIAZhqEYM7vjkqE= -go.opentelemetry.io/collector/processor v1.34.0/go.mod h1:VCl4vYj2tdO4APUcr0q6Eh796mqCCsH9Z/gqaPuzlUs= -go.opentelemetry.io/collector/processor/processortest v0.128.0 h1:xPhOSmGFDGqhC3/nu1BqPSE6EpDPAf1/F+BfaYjDn/8= -go.opentelemetry.io/collector/processor/processortest v0.128.0/go.mod h1:XXXom+mbAQtrkcvq4Ecd6n8RQoVgcfLe1vrUlr6U2gI= -go.opentelemetry.io/collector/processor/xprocessor v0.128.0 h1:ObbtdXab0is6bdt4XabsRJZ+SUTuwQjPVlHTbmScfNg= -go.opentelemetry.io/collector/processor/xprocessor v0.128.0/go.mod h1:/nHXW15nzwSRQ+25Cb+r17he/uMtCEvSOBGqpDbn3Uk= +go.opentelemetry.io/collector/component v1.35.0 h1:JpvBukEcEUvJ/TInF1KYpXtWEP+C7iYkxCHKjI0o7BQ= +go.opentelemetry.io/collector/component v1.35.0/go.mod h1:hU/ieWPxWbMAacODCSqem5ZaN6QH9W5GWiZ3MtXVuwc= +go.opentelemetry.io/collector/component/componentstatus v0.129.0 h1:ejpBAt7hXAAZiQKcSxLvcy8sj8SjY4HOLdoXIlW6ybw= +go.opentelemetry.io/collector/component/componentstatus v0.129.0/go.mod h1:/dLPIxn/tRMWmGi+DPtuFoBsffOLqPpSZ2IpEQzYtwI= +go.opentelemetry.io/collector/component/componenttest v0.129.0 h1:gpKkZGCRPu3Yn0U2co09bMvhs17yLFb59oV8Gl9mmRI= +go.opentelemetry.io/collector/component/componenttest v0.129.0/go.mod h1:JR9k34Qvd/pap6sYkPr5QqdHpTn66A5lYeYwhenKBAM= +go.opentelemetry.io/collector/confmap v1.35.0 h1:U4JDATAl4PrKWe9bGHbZkoQXmJXefWgR2DIkFvw8ULQ= +go.opentelemetry.io/collector/confmap v1.35.0/go.mod h1:qX37ExVBa+WU4jWWJCZc7IJ+uBjb58/9oL+/ctF1Bt0= +go.opentelemetry.io/collector/confmap/xconfmap v0.129.0 h1:Q/+pJKrkCaMPSoSAH2BpC3UZCh+5hTiFkh/bdy5yChk= +go.opentelemetry.io/collector/confmap/xconfmap v0.129.0/go.mod h1:RNMnlay2meJDXcKjxiLbST9/YAhKLJlj0kZCrJrLGgw= +go.opentelemetry.io/collector/consumer v1.35.0 h1:mgS42yh1maXBIE65IT4//iOA89BE+7xSUzV8czyevHg= +go.opentelemetry.io/collector/consumer v1.35.0/go.mod h1:9sSPX0hDHaHqzR2uSmfLOuFK9v3e9K3HRQ+fydAjOWs= +go.opentelemetry.io/collector/consumer/consumertest v0.129.0 h1:kRmrAgVvPxH5c/rTaOYAzyy0YrrYhQpBNkuqtDRrgeU= +go.opentelemetry.io/collector/consumer/consumertest v0.129.0/go.mod h1:JgJKms1+v/CuAjkPH+ceTnKeDgUUGTQV4snGu5wTEHY= +go.opentelemetry.io/collector/consumer/xconsumer v0.129.0 h1:bRyJ9TGWwnrUnB5oQGTjPhxpVRbkIVeugmvks22bJ4A= +go.opentelemetry.io/collector/consumer/xconsumer v0.129.0/go.mod h1:pbe5ZyPJrtzdt/RRI0LqfT1GVBiJLbtkDKx3SBRTiTY= +go.opentelemetry.io/collector/featuregate v1.35.0 h1:c/XRtA35odgxVc4VgOF/PTIk7ajw1wYdQ6QI562gzd4= +go.opentelemetry.io/collector/featuregate v1.35.0/go.mod h1:Y/KsHbvREENKvvN9RlpiWk/IGBK+CATBYzIIpU7nccc= +go.opentelemetry.io/collector/internal/telemetry v0.129.0 h1:jkzRpIyMxMGdAzVOcBe8aRNrbP7eUrMq6cxEHe0sbzA= +go.opentelemetry.io/collector/internal/telemetry v0.129.0/go.mod h1:riAPlR2LZBV7VEx4LicOKebg3N1Ja3izzkv5fl1Lhiw= +go.opentelemetry.io/collector/pdata v1.35.0 h1:ck6WO6hCNjepADY/p9sT9/rLECTLO5ukYTumKzsqB/E= +go.opentelemetry.io/collector/pdata v1.35.0/go.mod h1:pttpb089864qG1k0DMeXLgwwTFLk+o3fAW9I6MF9tzw= +go.opentelemetry.io/collector/pdata/pprofile v0.129.0 h1:DgZTvjOGmyZRx7Or80hz8XbEaGwHPkIh2SX1A5eXttQ= +go.opentelemetry.io/collector/pdata/pprofile v0.129.0/go.mod h1:uUBZxqJNOk6QIMvbx30qom//uD4hXJ1K/l3qysijMLE= +go.opentelemetry.io/collector/pdata/testdata v0.129.0 h1:n1QLnLOtrcAR57oMSVzmtPsQEpCc/nE5Avk1xfuAkjY= +go.opentelemetry.io/collector/pdata/testdata v0.129.0/go.mod h1:RfY5IKpmcvkS2IGVjl9jG9fcT7xpQEBWpg9sQOn/7mY= +go.opentelemetry.io/collector/pipeline v0.129.0 h1:Mp7RuKLizLQJ0381eJqKQ0zpgkFlhTE9cHidpJQIvMU= +go.opentelemetry.io/collector/pipeline v0.129.0/go.mod h1:TO02zju/K6E+oFIOdi372Wk0MXd+Szy72zcTsFQwXl4= +go.opentelemetry.io/collector/processor v1.35.0 h1:YOfHemhhodYn4BnPjN7kWYYDhzPVqRkyHCaQ8mAlavs= +go.opentelemetry.io/collector/processor v1.35.0/go.mod h1:cWHDOpmpAaVNCc9K9j2/okZoLIuP/EpGGRNhM4JGmFM= +go.opentelemetry.io/collector/processor/processortest v0.129.0 h1:r5iJHdS7Ffdb2zmMVYx4ahe92PLrce5cas/AJEXivkY= +go.opentelemetry.io/collector/processor/processortest v0.129.0/go.mod h1:gdf8GzyzjGoDTA11+CPwC4jfXphtC+B7MWbWn+LIWXc= +go.opentelemetry.io/collector/processor/xprocessor v0.129.0 h1:V3Zgd+YIeu3Ij3DPlGtzdcTwpqOQIqQVcL5jdHHS7sc= +go.opentelemetry.io/collector/processor/xprocessor v0.129.0/go.mod h1:78T+AP5NO137W/E+SibQhaqOyS67fR+IN697b4JFh00= go.opentelemetry.io/collector/semconv v0.128.0 h1:MzYOz7Vgb3Kf5D7b49pqqgeUhEmOCuT10bIXb/Cc+k4= go.opentelemetry.io/collector/semconv v0.128.0/go.mod h1:OPXer4l43X23cnjLXIZnRj/qQOjSuq4TgBLI76P9hns= go.opentelemetry.io/contrib/bridges/otelzap v0.11.0 h1:u2E32P7j1a/gRgZDWhIXC+Shd4rLg70mnE7QLI/Ssnw= go.opentelemetry.io/contrib/bridges/otelzap v0.11.0/go.mod h1:pJPCLM8gzX4ASqLlyAXjHBEYxgbOQJ/9bidWxD6PEPQ= +go.opentelemetry.io/contrib/detectors/gcp v1.34.0/go.mod h1:cV4BMFcscUR/ckqLkbfQmF0PRsq8w/lMGzdbCSveBHo= go.opentelemetry.io/contrib/detectors/gcp v1.35.0 h1:bGvFt68+KTiAKFlacHW6AhA56GF2rS0bdD3aJYEnmzA= go.opentelemetry.io/contrib/detectors/gcp v1.35.0/go.mod h1:qGWP8/+ILwMRIUf9uIVLloR1uo5ZYAslM4O6OqUi1DA= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 h1:q4XOmH/0opmeuJtPsbFNivyl7bCt7yRBbeEm2sC/XtQ= @@ -1045,6 +1873,8 @@ go.opentelemetry.io/contrib/propagators/jaeger v1.36.0 h1:SoCgXYF4ISDtNyfLUzsGDa go.opentelemetry.io/contrib/propagators/jaeger v1.36.0/go.mod h1:VHu48l0YTRKSObdPQ+Sb8xMZvdnJlN7yhHuHoPgNqHM= go.opentelemetry.io/contrib/propagators/ot v1.36.0 h1:UBoZjbx483GslNKYK2YpfvePTJV4BHGeFd8+b7dexiM= go.opentelemetry.io/contrib/propagators/ot v1.36.0/go.mod h1:adDDRry19/n9WoA7mSCMjoVJcmzK/bZYzX9SR+g2+W4= +go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= +go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI= go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg= go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E= go.opentelemetry.io/otel/bridge/opentracing v1.36.0 h1:GWGmcYhMCu6+K/Yz5KWSETU/esd/mkVGx+77uKtLjpk= @@ -1059,15 +1889,24 @@ go.opentelemetry.io/otel/log v0.12.2 h1:yob9JVHn2ZY24byZeaXpTVoPS6l+UrrxmxmPKohX go.opentelemetry.io/otel/log v0.12.2/go.mod h1:ShIItIxSYxufUMt+1H5a2wbckGli3/iCfuEbVZi/98E= go.opentelemetry.io/otel/log/logtest v0.0.0-20250526142609-aa5bd0e64989 h1:4JF7oY9CcHrPGfBLijDcXZyCzGckVEyOjuat5ktmQRg= go.opentelemetry.io/otel/log/logtest v0.0.0-20250526142609-aa5bd0e64989/go.mod h1:NToOxLDCS1tXDSB2dIj44H9xGPOpKr0csIN+gnuihv4= +go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= +go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE= go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE= go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs= +go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU= go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs= go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY= +go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w= go.opentelemetry.io/otel/sdk/metric v1.36.0 h1:r0ntwwGosWGaa0CrSt8cuNuTcccMXERFwHX4dThiPis= go.opentelemetry.io/otel/sdk/metric v1.36.0/go.mod h1:qTNOhFDfKRwX0yXOqJYegL5WRaW376QbB7P4Pb0qva4= +go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= +go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE= go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w= go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= go.opentelemetry.io/proto/otlp v1.7.0 h1:jX1VolD6nHuFzOYso2E73H85i92Mv8JQYk0K9vz09os= go.opentelemetry.io/proto/otlp v1.7.0/go.mod h1:fSKjH6YJ7HDlwzltzyMj036AJ3ejJLCgCSHGj4efDDo= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= @@ -1094,29 +1933,60 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= +golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= +golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0= +golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= +golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= +golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= +golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= +golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= +golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM= golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U= +golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20220827204233-334a2380cb91/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476 h1:bsqhLWFR6G6xiQcb+JoGqdKdRU6WzPWmK8E0jxTjzo4= golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8= +golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/image v0.0.0-20190910094157-69e4b8554b2a/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200430140353-33d19683fad8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200618115811-c13761719519/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20201208152932-35266b937fa6/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20210216034530-4410531fe030/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20210607152325-775e3b0c77b9/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20210628002857-a66eb6448b8d/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20211028202545-6944b10bf410/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20220302094943-723b81ca9867/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -1139,10 +2009,20 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w= golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1182,6 +2062,8 @@ golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLd golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= @@ -1189,6 +2071,30 @@ golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= +golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= +golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= +golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= +golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= +golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= +golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw= golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1211,9 +2117,19 @@ golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= +golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= +golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= +golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= +golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1225,10 +2141,20 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220513210516-0976fa681c29/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8= golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1276,6 +2202,7 @@ golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210304124612-50617c2ba197/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1291,10 +2218,12 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1308,14 +2237,55 @@ golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= +golang.org/x/telemetry v0.0.0-20240521205824-bda55230c457/go.mod h1:pRgIJT+bRLFKnoM1ldnzKoxTIn14Yxz928LQRYYgIN0= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= +golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= +golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= +golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= +golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= +golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= +golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= +golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek= golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg= golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1327,15 +2297,32 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= +golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M= golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= +golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -1349,6 +2336,7 @@ golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1379,6 +2367,7 @@ golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= @@ -1390,6 +2379,15 @@ golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= +golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= +golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= golang.org/x/tools v0.34.0 h1:qIpSLOxeCYGg9TrcJokLBG4KFA6d795g0xkBkiESGlo= golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1398,8 +2396,18 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= +gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0= +gonum.org/v1/gonum v0.11.0/go.mod h1:fSG4YDCxxUZQJ7rKsQrj0gMOg00Il0Z96/qMA4bVQhA= gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= +gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY= +gonum.org/v1/plot v0.10.1/go.mod h1:VZW5OlhkL1mysU9vaqNHnsy86inf6Ot+jB3r+BczCEo= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -1437,11 +2445,35 @@ google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/S google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= +google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= google.golang.org/api v0.81.0/go.mod h1:FA6Mb/bZxj706H2j+j2d6mHEEaHBmbbWnkfvmorOCko= -google.golang.org/api v0.228.0 h1:X2DJ/uoWGnY5obVjewbp8icSL5U4FzuCfy9OjbLSnLs= -google.golang.org/api v0.228.0/go.mod h1:wNvRS1Pbe8r4+IfBIniV8fwCpGwTrYa+kMUDiC5z5a4= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= +google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= +google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI= +google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91A08= +google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= +google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= +google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0= +google.golang.org/api v0.106.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.107.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI= +google.golang.org/api v0.111.0/go.mod h1:qtFHvU9mhgTJegR31csQ+rwxyUTHOKFqCKWp1J0fdw0= +google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg= +google.golang.org/api v0.118.0/go.mod h1:76TtD3vkgmZ66zZzp72bUUklpmQmKlhh6sYtIjYK+5E= +google.golang.org/api v0.122.0/go.mod h1:gcitW0lvnyWjSp9nKxAbdHKIZ6vF4aajGueeslZOyms= +google.golang.org/api v0.124.0/go.mod h1:xu2HQurE5gi/3t1aFCvhPD781p0a3p11sdunTJ2BlP4= +google.golang.org/api v0.125.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw= +google.golang.org/api v0.126.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw= +google.golang.org/api v0.239.0 h1:2hZKUnFZEy81eugPs4e2XzIJ5SOwQg0G82bpXD65Puo= +google.golang.org/api v0.239.0/go.mod h1:cOVEm2TpdAGHL2z+UwyS+kmlGr3bVWQQ6sYEqkKje50= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= @@ -1449,7 +2481,6 @@ google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCID google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180518175338-11a468237815/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -1491,6 +2522,7 @@ google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= @@ -1521,52 +2553,114 @@ google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2 google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20220329172620-7be39ac1afc7/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20250204164813-702378808489 h1:nQcbCCOg2h2CQ0yA8SY3AHqriNKDvsetuq9mE/HFjtc= -google.golang.org/genproto v0.0.0-20250204164813-702378808489/go.mod h1:wkQ2Aj/xvshAUDtO/JHvu9y+AaN9cqs28QuSVSHtZSY= +google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= +google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= +google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220817144833-d7fd3f11b9b1/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829144015-23454907ede3/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220914142337-ca0e39ece12f/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= +google.golang.org/genproto v0.0.0-20220926165614-551eb538f295/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20220926220553-6981cbe3cfce/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U= +google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= +google.golang.org/genproto v0.0.0-20221109142239-94d6d90a7d66/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221114212237-e4508ebdbee1/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221117204609-8f9c96812029/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221201164419-0e50fba7f41c/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221201204527-e3fa12d562f3/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614GARnxrLsqKREzmNYJACSWWpAWdNMwnD7c2BE= +google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230112194545-e10362b5ecf9/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230113154510-dbe35b8444a5/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230123190316-2c411cf9d197/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230124163310-31e0e69b6fc2/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230125152338-dcaf20b6aeaa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230127162408-596548ed4efa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230209215440-0dfe4f8abfcc/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230216225411-c8e22ba71e44/go.mod h1:8B0gmkoRebU8ukX6HP+4wrVQUY1+6PkQ44BSyIlflHA= +google.golang.org/genproto v0.0.0-20230222225845-10f96fb3dbec/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= +google.golang.org/genproto v0.0.0-20230223222841-637eb2293923/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= +google.golang.org/genproto v0.0.0-20230303212802-e74f57abe488/go.mod h1:TvhZT5f700eVlTNwND1xoEZQeWTB2RY/65kplwl/bFA= +google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/genproto v0.0.0-20230320184635-7606e756e683/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/genproto v0.0.0-20230323212658-478b75c54725/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= +google.golang.org/genproto v0.0.0-20230330154414-c0448cd141ea/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= +google.golang.org/genproto v0.0.0-20230331144136-dcfb400f0633/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= +google.golang.org/genproto v0.0.0-20230403163135-c38d8f061ccd/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= +google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY= +google.golang.org/genproto v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:zqTuNwFlFRsw5zIts5VnzLQxSRqh+CGOTVMlYbY0Eyk= +google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:xZnkP7mREFX5MORlOPEzLMr+90PPZQ2QWzrVTWfAq64= +google.golang.org/genproto v0.0.0-20230629202037-9506855d4529/go.mod h1:xZnkP7mREFX5MORlOPEzLMr+90PPZQ2QWzrVTWfAq64= +google.golang.org/genproto v0.0.0-20230706204954-ccb25ca9f130/go.mod h1:O9kGHb51iE/nOGvQaDUuadVYqovW56s5emA88lQnj6Y= +google.golang.org/genproto v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:0ggbjUrZYpy1q+ANUS30SEoGZ53cdfwtbuG7Ptgy108= +google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5/go.mod h1:oH/ZOT02u4kWEp7oYBGYFFkCdKS/uYR9Z7+0/xuuFp8= +google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 h1:1tXaIXCracvtsRxSBsYDiSBN0cuJvM7QYW+MrpIRY78= +google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2/go.mod h1:49MsLSx0oWMOZqcpB3uL8ZOkAh1+TndpJ8ONoCBWiZk= +google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8= +google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= +google.golang.org/genproto/googleapis/api v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= +google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= +google.golang.org/genproto/googleapis/api v0.0.0-20230629202037-9506855d4529/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= +google.golang.org/genproto/googleapis/api v0.0.0-20230706204954-ccb25ca9f130/go.mod h1:mPBs5jNgx2GuQGvFwUvVKqtn6HsUw9nP64BedgvqEsQ= +google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ= +google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= +google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142/go.mod h1:d6be+8HhtEtucleCbxpPW9PA9XwISACu8nvpPqF0BVo= +google.golang.org/genproto/googleapis/api v0.0.0-20241202173237-19429a94021a/go.mod h1:jehYqy3+AhJU9ve55aNOaSml7wUXjF9x6z2LcCfpAhY= +google.golang.org/genproto/googleapis/api v0.0.0-20250106144421-5f5ef82da422/go.mod h1:b6h1vNKhxaSoEI+5jc3PJUCustfli/mRab7295pY7rw= google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 h1:oWVWY3NzT7KJppx2UKhKmzPq4SRe0LdCijVRwvGeikY= google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822/go.mod h1:h3c4v36UTKzUiuaOKQ6gr3S+0hovBtUrXzTG/i3+XEc= +google.golang.org/genproto/googleapis/bytestream v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:ylj+BE99M198VPbBh6A8d9n3w8fChvyLK3wwBOjXBFA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234015-3fc162c6f38a/go.mod h1:xURIpW9ES5+/GZhnV6beoEtxQrnkRGIfP5VQG2tCBLc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230629202037-9506855d4529/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230706204954-ccb25ca9f130/go.mod h1:8mL13HKkDa+IuJ8yruA3ci0q+0vsUz4m//+ottjwS5o= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230731190214-cbb8c96f2d6d/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230803162519-f966b187b2e5/go.mod h1:zBEcrKX2ZOcEkHWxBPAIvYUWOKKMIhYcmNiUIu2ji3I= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241118233622-e639e219e697/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241202173237-19429a94021a/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250102185135-69823020774d/go.mod h1:3ENsm/5D1mzDyhpzeRi1NR784I0BcofWBoSc5QqqMK4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:+2Yz8+CLJbIfL9z73EW45avw8Lmge3xVElCP9zEKi50= google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 h1:fc6jSaCT0vBduLYZHYrBBNY4dsWuvgyff9noRNDdBeE= google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= -google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= -google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.73.0 h1:VIWSmpI2MegBtTuFt5/JWy2oXxtjJ/e89Z70ImfD2ok= -google.golang.org/grpc v1.73.0/go.mod h1:50sbHOUqWoCQGI8V2HQLJM0B+LMlIUjNSZmow7EVBQc= +google.golang.org/grpc v1.71.2 h1:KnzCueW4s+8ojAPZ+NnyZAELjsIMJGteKjKejieEC7M= +google.golang.org/grpc v1.71.2/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd/8Ec= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -1582,6 +2676,17 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.4/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1625,6 +2730,7 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= k8s.io/api v0.33.1 h1:tA6Cf3bHnLIrUK4IqEgb2v++/GYUtqiu9sRVk3iBXyw= k8s.io/api v0.33.1/go.mod h1:87esjTn9DRSRTD4fWMXamiXxJhpOIREjWOSjsW1kEHw= k8s.io/apimachinery v0.33.1 h1:mzqXWV8tW9Rw4VeW9rEkqvnxj59k1ezDUl20tFK/oM4= @@ -1637,7 +2743,59 @@ k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUy k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8= k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y= k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= +lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= +modernc.org/cc/v3 v3.36.0/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/cc/v3 v3.36.2/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/cc/v3 v3.36.3/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/cc/v3 v3.37.0/go.mod h1:vtL+3mdHx/wcj3iEGz84rQa8vEqR6XM84v5Lcvfph20= +modernc.org/cc/v3 v3.40.0/go.mod h1:/bTg4dnWkSXowUO6ssQKnOV0yMVxDYNIsIrzqTFDGH0= +modernc.org/ccgo/v3 v3.0.0-20220428102840-41399a37e894/go.mod h1:eI31LL8EwEBKPpNpA4bU1/i+sKOwOrQy8D87zWUcRZc= +modernc.org/ccgo/v3 v3.0.0-20220430103911-bc99d88307be/go.mod h1:bwdAnOoaIt8Ax9YdWGjxWsdkPcZyRPHqrOvJxaKAKGw= +modernc.org/ccgo/v3 v3.0.0-20220904174949-82d86e1b6d56/go.mod h1:YSXjPL62P2AMSxBphRHPn7IkzhVHqkvOnRKAKh+W6ZI= +modernc.org/ccgo/v3 v3.16.4/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= +modernc.org/ccgo/v3 v3.16.6/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= +modernc.org/ccgo/v3 v3.16.8/go.mod h1:zNjwkizS+fIFDrDjIAgBSCLkWbJuHF+ar3QRn+Z9aws= +modernc.org/ccgo/v3 v3.16.9/go.mod h1:zNMzC9A9xeNUepy6KuZBbugn3c0Mc9TeiJO4lgvkJDo= +modernc.org/ccgo/v3 v3.16.13-0.20221017192402-261537637ce8/go.mod h1:fUB3Vn0nVPReA+7IG7yZDfjv1TMWjhQP8gCxrFAtL5g= +modernc.org/ccgo/v3 v3.16.13/go.mod h1:2Quk+5YgpImhPjv2Qsob1DnZ/4som1lJTodubIcoUkY= +modernc.org/ccorpus v1.11.6/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ= +modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM= +modernc.org/libc v0.0.0-20220428101251-2d5f3daf273b/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= +modernc.org/libc v1.16.0/go.mod h1:N4LD6DBE9cf+Dzf9buBlzVJndKr/iJHG97vGLHYnb5A= +modernc.org/libc v1.16.1/go.mod h1:JjJE0eu4yeK7tab2n4S1w8tlWd9MxXLRzheaRnAKymU= +modernc.org/libc v1.16.17/go.mod h1:hYIV5VZczAmGZAnG15Vdngn5HSF5cSkbvfz2B7GRuVU= +modernc.org/libc v1.16.19/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= +modernc.org/libc v1.17.0/go.mod h1:XsgLldpP4aWlPlsjqKRdHPqCxCjISdHfM/yeWC5GyW0= +modernc.org/libc v1.17.1/go.mod h1:FZ23b+8LjxZs7XtFMbSzL/EhPxNbfZbErxEHc7cbD9s= +modernc.org/libc v1.17.4/go.mod h1:WNg2ZH56rDEwdropAJeZPQkXmDwh+JCA1s/htl6r2fA= +modernc.org/libc v1.18.0/go.mod h1:vj6zehR5bfc98ipowQOM2nIDUZnVew/wNC/2tOGS+q0= +modernc.org/libc v1.20.3/go.mod h1:ZRfIaEkgrYgZDl6pa4W39HgN5G/yDW+NRmNKZBDFrk0= +modernc.org/libc v1.21.4/go.mod h1:przBsL5RDOZajTVslkugzLBj1evTue36jEomFQOoYuI= +modernc.org/libc v1.22.2/go.mod h1:uvQavJ1pZ0hIoC/jfqNoMLURIMhKzINIWypNM17puug= +modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/memory v1.1.1/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= +modernc.org/memory v1.2.0/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= +modernc.org/memory v1.2.1/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= +modernc.org/memory v1.3.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= +modernc.org/memory v1.4.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= +modernc.org/memory v1.5.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= +modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/sqlite v1.18.1/go.mod h1:6ho+Gow7oX5V+OiOQ6Tr4xeqbx13UZ6t+Fw9IRUG4d4= +modernc.org/sqlite v1.18.2/go.mod h1:kvrTLEWgxUcHa2GfHBQtanR1H9ht3hTJNtKpzH9k1u0= +modernc.org/strutil v1.1.1/go.mod h1:DE+MQQ/hjKBZS2zNInV5hhcipt5rLPWkmpbGeW5mmdw= +modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw= +modernc.org/tcl v1.13.1/go.mod h1:XOLfOwzhkljL4itZkK6T72ckMgvj0BDsnKNdZVUOecw= +modernc.org/tcl v1.13.2/go.mod h1:7CLiGIPo1M8Rv1Mitpv5akc2+8fxUd2y2UzC/MfMzy0= +modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= +modernc.org/token v1.0.1/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= +modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= +modernc.org/z v1.5.1/go.mod h1:eWFB510QWW5Th9YGZT81s+LwvaAs3Q2yr4sP0rmLkv8= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= diff --git a/integration/api_endpoints_test.go b/integration/api_endpoints_test.go index 6da0971b49..5e5d683e06 100644 --- a/integration/api_endpoints_test.go +++ b/integration/api_endpoints_test.go @@ -9,12 +9,15 @@ import ( "net/http" "path/filepath" "testing" + "time" + "github.com/prometheus/prometheus/model/labels" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/thanos-io/thanos/pkg/runutil" "github.com/cortexproject/cortex/integration/e2e" + e2edb "github.com/cortexproject/cortex/integration/e2e/db" "github.com/cortexproject/cortex/integration/e2ecortex" ) @@ -85,3 +88,50 @@ func TestConfigAPIEndpoint(t *testing.T) { cortex2 := e2ecortex.NewSingleBinaryWithConfigFile("cortex-2", cortexConfigFile, configOverrides, "", 9009, 9095) require.NoError(t, s.StartAndWaitReady(cortex2)) } + +func Test_AllUserStats_WhenIngesterRollingUpdate(t *testing.T) { + s, err := e2e.NewScenario(networkName) + require.NoError(t, err) + defer s.Close() + + flags := BlocksStorageFlags() + flags["-distributor.replication-factor"] = "3" + flags["-distributor.sharding-strategy"] = "shuffle-sharding" + flags["-distributor.ingestion-tenant-shard-size"] = "3" + flags["-distributor.shard-by-all-labels"] = "true" + + // Start dependencies. + consul := e2edb.NewConsul() + minio := e2edb.NewMinio(9000, flags["-blocks-storage.s3.bucket-name"]) + require.NoError(t, s.StartAndWaitReady(consul, minio)) + + // Start Cortex components. + distributor := e2ecortex.NewDistributor("distributor", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), flags, "") + ingester1 := e2ecortex.NewIngester("ingester-1", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), flags, "") + ingester2 := e2ecortex.NewIngester("ingester-2", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), flags, "") + ingester3 := e2ecortex.NewIngester("ingester-3", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), flags, "") + require.NoError(t, s.StartAndWaitReady(distributor, ingester1, ingester2, ingester3)) + + // Wait until distributor has updated the ring. + require.NoError(t, distributor.WaitSumMetricsWithOptions(e2e.Equals(3), []string{"cortex_ring_members"}, e2e.WithLabelMatchers( + labels.MustNewMatcher(labels.MatchEqual, "name", "ingester"), + labels.MustNewMatcher(labels.MatchEqual, "state", "ACTIVE")))) + + // stop ingester1 to emulate rolling update + require.NoError(t, s.Stop(ingester1)) + + client, err := e2ecortex.NewClient(distributor.HTTPEndpoint(), "", "", "", "user-1") + require.NoError(t, err) + + now := time.Now() + series, _ := generateSeries("series_1", now) + res, err := client.Push(series) + require.NoError(t, err) + require.Equal(t, 200, res.StatusCode) + + // QueriedIngesters is 2 since ingester1 has been stopped. + userStats, err := client.AllUserStats() + require.NoError(t, err) + require.Len(t, userStats, 1) + require.Equal(t, uint64(2), userStats[0].QueriedIngesters) +} diff --git a/integration/e2e/composite_service.go b/integration/e2e/composite_service.go index 0dd840db00..2ed6e1ef58 100644 --- a/integration/e2e/composite_service.go +++ b/integration/e2e/composite_service.go @@ -84,7 +84,7 @@ func (s *CompositeHTTPService) SumMetrics(metricNames []string, opts ...MetricsO return nil, fmt.Errorf("unexpected mismatching sum metrics results (got %d, expected %d)", len(partials), len(sums)) } - for i := 0; i < len(sums); i++ { + for i := range sums { sums[i] += partials[i] } } diff --git a/integration/e2e/images/images.go b/integration/e2e/images/images.go index 7b74452667..1ef0e8bbde 100644 --- a/integration/e2e/images/images.go +++ b/integration/e2e/images/images.go @@ -11,5 +11,5 @@ var ( Minio = "minio/minio:RELEASE.2024-05-28T17-19-04Z" Consul = "consul:1.8.4" ETCD = "gcr.io/etcd-development/etcd:v3.4.7" - Prometheus = "quay.io/prometheus/prometheus:v3.3.1" + Prometheus = "quay.io/prometheus/prometheus:v3.5.0" ) diff --git a/integration/e2e/logger.go b/integration/e2e/logger.go index 5152ed5e73..1a25c09ada 100644 --- a/integration/e2e/logger.go +++ b/integration/e2e/logger.go @@ -29,7 +29,7 @@ func NewLogger(w io.Writer) *Logger { } } -func (l *Logger) Log(keyvals ...interface{}) error { +func (l *Logger) Log(keyvals ...any) error { log := strings.Builder{} log.WriteString(time.Now().Format("15:04:05")) diff --git a/integration/e2e/metrics.go b/integration/e2e/metrics.go index 988880e794..143b2fa73b 100644 --- a/integration/e2e/metrics.go +++ b/integration/e2e/metrics.go @@ -2,6 +2,7 @@ package e2e import ( "math" + "slices" io_prometheus_client "github.com/prometheus/client_model/go" ) @@ -143,12 +144,7 @@ func EqualsAmong(values ...float64) func(sums ...float64) bool { if len(sums) != 1 { panic("equals among: expected one value") } - for _, value := range values { - if sums[0] == value { - return true - } - } - return false + return slices.Contains(values, sums[0]) } } diff --git a/integration/e2e/scenario.go b/integration/e2e/scenario.go index 19938fedcd..3535e77deb 100644 --- a/integration/e2e/scenario.go +++ b/integration/e2e/scenario.go @@ -163,7 +163,7 @@ func (s *Scenario) shutdown() { "--filter", fmt.Sprintf("network=%s", s.networkName), ); err == nil { - for _, containerID := range strings.Split(string(out), "\n") { + for containerID := range strings.SplitSeq(string(out), "\n") { containerID = strings.TrimSpace(containerID) if containerID == "" { continue diff --git a/integration/e2e/service.go b/integration/e2e/service.go index bc99429e1b..c3fb7ad0fb 100644 --- a/integration/e2e/service.go +++ b/integration/e2e/service.go @@ -503,7 +503,7 @@ type LinePrefixLogger struct { } func (w *LinePrefixLogger) Write(p []byte) (n int, err error) { - for _, line := range strings.Split(string(p), "\n") { + for line := range strings.SplitSeq(string(p), "\n") { // Skip empty lines line = strings.TrimSpace(line) if line == "" { @@ -698,7 +698,7 @@ func (s *HTTPService) WaitRemovedMetric(metricName string, opts ...MetricsOption func parseDockerIPv4Port(out string) (int, error) { // The "docker port" output may be multiple lines if both IPv4 and IPv6 are supported, // so we need to parse each line. - for _, line := range strings.Split(out, "\n") { + for line := range strings.SplitSeq(out, "\n") { matches := dockerIPv4PortPattern.FindStringSubmatch(strings.TrimSpace(line)) if len(matches) != 2 { continue diff --git a/integration/e2e/util.go b/integration/e2e/util.go index dd10efa1ba..c7af414157 100644 --- a/integration/e2e/util.go +++ b/integration/e2e/util.go @@ -2,6 +2,7 @@ package e2e import ( "context" + "maps" "math" "math/rand" "net/http" @@ -19,6 +20,7 @@ import ( "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/prompb" + writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb" "github.com/prometheus/prometheus/tsdb/tsdbutil" @@ -62,9 +64,7 @@ func MergeFlagsWithoutRemovingEmpty(inputs ...map[string]string) map[string]stri output := map[string]string{} for _, input := range inputs { - for name, value := range input { - output[name] = value - } + maps.Copy(output, input) } return output @@ -210,7 +210,7 @@ func GenerateSeriesWithSamples( startTMillis := tsMillis samples := make([]prompb.Sample, numSamples) - for i := 0; i < numSamples; i++ { + for i := range numSamples { scrapeJitter := rand.Int63n(10) + 1 // add a jitter to simulate real-world scenarios, refer to: https://github.com/prometheus/prometheus/issues/13213 samples[i] = prompb.Sample{ Timestamp: startTMillis + scrapeJitter, @@ -287,11 +287,11 @@ func CreateNHBlock( }() app := h.Appender(ctx) - for i := 0; i < len(series); i++ { + for i := range series { num := random.Intn(i + 1) var ref storage.SeriesRef start := RandRange(rnd, mint, maxt) - for j := 0; j < numNHSamples; j++ { + for j := range numNHSamples { if num%2 == 0 { // append float histogram ref, err = app.AppendHistogram(ref, series[i], start, nil, tsdbutil.GenerateTestFloatHistogram(int64(i+j))) @@ -371,11 +371,11 @@ func CreateBlock( }() app := h.Appender(ctx) - for i := 0; i < len(series); i++ { + for i := range series { var ref storage.SeriesRef start := RandRange(rnd, mint, maxt) - for j := 0; j < numSamples; j++ { + for j := range numSamples { ref, err = app.Append(ref, series[i], start, float64(i+j)) if err != nil { if rerr := app.Rollback(); rerr != nil { @@ -423,3 +423,117 @@ func CreateBlock( return id, nil } + +func GenerateHistogramSeriesV2(name string, ts time.Time, i uint32, floatHistogram bool, additionalLabels ...prompb.Label) (symbols []string, series []writev2.TimeSeries) { + tsMillis := TimeToMilliseconds(ts) + + st := writev2.NewSymbolTable() + lb := labels.NewScratchBuilder(0) + lb.Add("__name__", name) + for _, lbl := range additionalLabels { + lb.Add(lbl.Name, lbl.Value) + } + + var ( + h *histogram.Histogram + fh *histogram.FloatHistogram + ph writev2.Histogram + ) + if floatHistogram { + fh = tsdbutil.GenerateTestFloatHistogram(int64(i)) + ph = writev2.FromFloatHistogram(tsMillis, fh) + } else { + h = tsdbutil.GenerateTestHistogram(int64(i)) + ph = writev2.FromIntHistogram(tsMillis, h) + } + + // Generate the series + series = append(series, writev2.TimeSeries{ + LabelsRefs: st.SymbolizeLabels(lb.Labels(), nil), + Histograms: []writev2.Histogram{ph}, + }) + + symbols = st.Symbols() + + return +} + +func GenerateSeriesV2(name string, ts time.Time, additionalLabels ...prompb.Label) (symbols []string, series []writev2.TimeSeries, vector model.Vector) { + tsMillis := TimeToMilliseconds(ts) + value := rand.Float64() + + st := writev2.NewSymbolTable() + lb := labels.NewScratchBuilder(0) + lb.Add("__name__", name) + + for _, label := range additionalLabels { + lb.Add(label.Name, label.Value) + } + series = append(series, writev2.TimeSeries{ + // Generate the series + LabelsRefs: st.SymbolizeLabels(lb.Labels(), nil), + Samples: []writev2.Sample{ + {Value: value, Timestamp: tsMillis}, + }, + Metadata: writev2.Metadata{ + Type: writev2.Metadata_METRIC_TYPE_GAUGE, + }, + }) + symbols = st.Symbols() + + // Generate the expected vector when querying it + metric := model.Metric{} + metric[labels.MetricName] = model.LabelValue(name) + for _, lbl := range additionalLabels { + metric[model.LabelName(lbl.Name)] = model.LabelValue(lbl.Value) + } + + vector = append(vector, &model.Sample{ + Metric: metric, + Value: model.SampleValue(value), + Timestamp: model.Time(tsMillis), + }) + + return +} + +func GenerateV2SeriesWithSamples( + name string, + startTime time.Time, + scrapeInterval time.Duration, + startValue int, + numSamples int, + additionalLabels ...prompb.Label, +) (symbols []string, series writev2.TimeSeries) { + tsMillis := TimeToMilliseconds(startTime) + durMillis := scrapeInterval.Milliseconds() + + st := writev2.NewSymbolTable() + lb := labels.NewScratchBuilder(0) + lb.Add("__name__", name) + + for _, label := range additionalLabels { + lb.Add(label.Name, label.Value) + } + + startTMillis := tsMillis + samples := make([]writev2.Sample, numSamples) + for i := range numSamples { + scrapeJitter := rand.Int63n(10) + 1 // add a jitter to simulate real-world scenarios, refer to: https://github.com/prometheus/prometheus/issues/13213 + samples[i] = writev2.Sample{ + Timestamp: startTMillis + scrapeJitter, + Value: float64(i + startValue), + } + startTMillis += durMillis + } + + series = writev2.TimeSeries{ + LabelsRefs: st.SymbolizeLabels(lb.Labels(), nil), + Samples: samples, + Metadata: writev2.Metadata{ + Type: writev2.Metadata_METRIC_TYPE_GAUGE, + }, + } + + return st.Symbols(), series +} diff --git a/integration/e2ecortex/client.go b/integration/e2ecortex/client.go index 9067b60c07..fa707e7f49 100644 --- a/integration/e2ecortex/client.go +++ b/integration/e2ecortex/client.go @@ -24,6 +24,7 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/rulefmt" "github.com/prometheus/prometheus/prompb" + writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/storage/remote" yaml "gopkg.in/yaml.v3" @@ -32,6 +33,7 @@ import ( "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp" + "github.com/cortexproject/cortex/pkg/ingester" "github.com/cortexproject/cortex/pkg/ruler" "github.com/cortexproject/cortex/pkg/util/backoff" ) @@ -114,6 +116,40 @@ func NewPromQueryClient(address string) (*Client, error) { return c, nil } +func (c *Client) AllUserStats() ([]ingester.UserIDStats, error) { + req, err := http.NewRequest(http.MethodGet, fmt.Sprintf("http://%s/distributor/all_user_stats", c.distributorAddress), nil) + if err != nil { + return nil, err + } + req.Header.Set("Accept", "application/json") + + ctx, cancel := context.WithTimeout(context.Background(), c.timeout) + defer cancel() + + // Execute HTTP request + res, err := c.httpClient.Do(req.WithContext(ctx)) + if err != nil { + return nil, err + } + defer res.Body.Close() + if res.StatusCode != http.StatusOK { + return nil, err + } + + bodyBytes, err := io.ReadAll(res.Body) + if err != nil { + return nil, err + } + + userStats := make([]ingester.UserIDStats, 0) + err = json.Unmarshal(bodyBytes, &userStats) + if err != nil { + return nil, err + } + + return userStats, nil +} + // Push the input timeseries to the remote endpoint func (c *Client) Push(timeseries []prompb.TimeSeries, metadata ...prompb.MetricMetadata) (*http.Response, error) { // Create write request @@ -147,6 +183,39 @@ func (c *Client) Push(timeseries []prompb.TimeSeries, metadata ...prompb.MetricM return res, nil } +// PushV2 the input timeseries to the remote endpoint +func (c *Client) PushV2(symbols []string, timeseries []writev2.TimeSeries) (*http.Response, error) { + // Create write request + data, err := proto.Marshal(&writev2.Request{Symbols: symbols, Timeseries: timeseries}) + if err != nil { + return nil, err + } + + // Create HTTP request + compressed := snappy.Encode(nil, data) + req, err := http.NewRequest("POST", fmt.Sprintf("http://%s/api/prom/push", c.distributorAddress), bytes.NewReader(compressed)) + if err != nil { + return nil, err + } + + req.Header.Add("Content-Encoding", "snappy") + req.Header.Set("Content-Type", "application/x-protobuf;proto=io.prometheus.write.v2.Request") + req.Header.Set("X-Prometheus-Remote-Write-Version", "2.0.0") + req.Header.Set("X-Scope-OrgID", c.orgID) + + ctx, cancel := context.WithTimeout(context.Background(), c.timeout) + defer cancel() + + // Execute HTTP request + res, err := c.httpClient.Do(req.WithContext(ctx)) + if err != nil { + return nil, err + } + + defer res.Body.Close() + return res, nil +} + func getNameAndAttributes(ts prompb.TimeSeries) (string, map[string]any) { var metricName string attributes := make(map[string]any) @@ -236,7 +305,7 @@ func convertTimeseriesToMetrics(timeseries []prompb.TimeSeries, metadata []promp return metrics } -func otlpWriteRequest(name string, labels ...prompb.Label) pmetricotlp.ExportRequest { +func otlpWriteRequest(name, unit string, temporality pmetric.AggregationTemporality, labels ...prompb.Label) pmetricotlp.ExportRequest { d := pmetric.NewMetrics() // Generate One Counter, One Gauge, One Histogram, One Exponential-Histogram @@ -258,10 +327,11 @@ func otlpWriteRequest(name string, labels ...prompb.Label) pmetricotlp.ExportReq // Generate One Counter counterMetric := scopeMetric.Metrics().AppendEmpty() counterMetric.SetName(name) + counterMetric.SetUnit(unit) counterMetric.SetDescription("test-counter-description") counterMetric.SetEmptySum() - counterMetric.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + counterMetric.Sum().SetAggregationTemporality(temporality) counterDataPoint := counterMetric.Sum().DataPoints().AppendEmpty() counterDataPoint.SetTimestamp(pcommon.NewTimestampFromTime(timestamp)) @@ -276,8 +346,8 @@ func otlpWriteRequest(name string, labels ...prompb.Label) pmetricotlp.ExportReq return pmetricotlp.NewExportRequestFromMetrics(d) } -func (c *Client) OTLPPushExemplar(name string, labels ...prompb.Label) (*http.Response, error) { - data, err := otlpWriteRequest(name, labels...).MarshalProto() +func (c *Client) OTLPPushExemplar(name, unit string, temporality pmetric.AggregationTemporality, labels ...prompb.Label) (*http.Response, error) { + data, err := otlpWriteRequest(name, unit, temporality, labels...).MarshalProto() if err != nil { return nil, err } diff --git a/integration/otlp_test.go b/integration/otlp_test.go index 7eda34e55e..fe83c1852f 100644 --- a/integration/otlp_test.go +++ b/integration/otlp_test.go @@ -18,6 +18,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/thanos-io/objstore/providers/s3" + "go.opentelemetry.io/collector/pdata/pmetric" "github.com/cortexproject/cortex/integration/e2e" e2edb "github.com/cortexproject/cortex/integration/e2e/db" @@ -149,7 +150,7 @@ func TestOTLPIngestExemplar(t *testing.T) { c, err := e2ecortex.NewClient(cortex.HTTPEndpoint(), cortex.HTTPEndpoint(), "", "", "user-1") require.NoError(t, err) - res, err := c.OTLPPushExemplar("exemplar_1") + res, err := c.OTLPPushExemplar("exemplar_1", "", pmetric.AggregationTemporalityCumulative) require.NoError(t, err) require.Equal(t, 200, res.StatusCode) @@ -241,15 +242,15 @@ func TestOTLPPromoteResourceAttributesPerTenant(t *testing.T) { {Name: "attr3", Value: "value"}, } - res, err := c1.OTLPPushExemplar("series_1", labels...) + res, err := c1.OTLPPushExemplar("series_1", "", pmetric.AggregationTemporalityCumulative, labels...) require.NoError(t, err) require.Equal(t, 200, res.StatusCode) - res, err = c2.OTLPPushExemplar("series_1", labels...) + res, err = c2.OTLPPushExemplar("series_1", "", pmetric.AggregationTemporalityCumulative, labels...) require.NoError(t, err) require.Equal(t, 200, res.StatusCode) - res, err = c3.OTLPPushExemplar("series_1", labels...) + res, err = c3.OTLPPushExemplar("series_1", "", pmetric.AggregationTemporalityCumulative, labels...) require.NoError(t, err) require.Equal(t, 200, res.StatusCode) @@ -265,3 +266,116 @@ func TestOTLPPromoteResourceAttributesPerTenant(t *testing.T) { require.NoError(t, err) require.Equal(t, labelSet3, []string{"__name__", "attr1", "attr2", "attr3", "instance", "job"}) } + +func TestOTLPEnableTypeAndUnitLabels(t *testing.T) { + s, err := e2e.NewScenario(networkName) + require.NoError(t, err) + defer s.Close() + + // Start dependencies. + minio := e2edb.NewMinio(9000, bucketName) + require.NoError(t, s.StartAndWaitReady(minio)) + + // Configure the blocks storage to frequently compact TSDB head + // and ship blocks to the storage. + flags := mergeFlags(BlocksStorageFlags(), map[string]string{ + "-auth.enabled": "true", + + // OTLP + "-distributor.otlp.enable-type-and-unit-labels": "true", + + // alert manager + "-alertmanager.web.external-url": "http://localhost/alertmanager", + "-alertmanager-storage.backend": "local", + "-alertmanager-storage.local.path": filepath.Join(e2e.ContainerSharedDir, "alertmanager_configs"), + }) + + // make alert manager config dir + require.NoError(t, writeFileToSharedDir(s, "alertmanager_configs", []byte{})) + + require.NoError(t, copyFileToSharedDir(s, "docs/configuration/single-process-config-blocks-local.yaml", cortexConfigFile)) + + // start cortex and assert runtime-config is loaded correctly + cortex := e2ecortex.NewSingleBinaryWithConfigFile("cortex", cortexConfigFile, flags, "", 9009, 9095) + require.NoError(t, s.StartAndWaitReady(cortex)) + + c, err := e2ecortex.NewClient(cortex.HTTPEndpoint(), cortex.HTTPEndpoint(), "", "", "user-1") + require.NoError(t, err) + + // Push some series to Cortex. + now := time.Now() + + labels := []prompb.Label{ + {Name: "service.name", Value: "test-service"}, + {Name: "attr1", Value: "value"}, + } + + res, err := c.OTLPPushExemplar("series_1", "seconds", pmetric.AggregationTemporalityCumulative, labels...) + require.NoError(t, err) + require.Equal(t, 200, res.StatusCode) + + value, err := c.Query("series_1_seconds", now) + require.NoError(t, err) + vector, ok := value.(model.Vector) + fmt.Println("vector", vector) + require.True(t, ok) + require.Equal(t, 1, len(vector)) + + metric := vector[0].Metric + require.Equal(t, model.LabelValue("seconds"), metric["__unit__"]) + require.Equal(t, model.LabelValue("gauge"), metric["__type__"]) +} + +func TestOTLPPushDeltaTemporality(t *testing.T) { + s, err := e2e.NewScenario(networkName) + require.NoError(t, err) + defer s.Close() + + // Start dependencies. + minio := e2edb.NewMinio(9000, bucketName) + require.NoError(t, s.StartAndWaitReady(minio)) + + // Configure the blocks storage to frequently compact TSDB head + // and ship blocks to the storage. + flags := mergeFlags(BlocksStorageFlags(), map[string]string{ + "-auth.enabled": "true", + + // OTLP + "-distributor.otlp.allow-delta-temporality": "true", + + // alert manager + "-alertmanager.web.external-url": "http://localhost/alertmanager", + "-alertmanager-storage.backend": "local", + "-alertmanager-storage.local.path": filepath.Join(e2e.ContainerSharedDir, "alertmanager_configs"), + }) + + // make alert manager config dir + require.NoError(t, writeFileToSharedDir(s, "alertmanager_configs", []byte{})) + + require.NoError(t, copyFileToSharedDir(s, "docs/configuration/single-process-config-blocks-local.yaml", cortexConfigFile)) + + // start cortex and assert runtime-config is loaded correctly + cortex := e2ecortex.NewSingleBinaryWithConfigFile("cortex", cortexConfigFile, flags, "", 9009, 9095) + require.NoError(t, s.StartAndWaitReady(cortex)) + + c, err := e2ecortex.NewClient(cortex.HTTPEndpoint(), cortex.HTTPEndpoint(), "", "", "user-1") + require.NoError(t, err) + + // Push some series to Cortex. + now := time.Now() + + labels := []prompb.Label{ + {Name: "service.name", Value: "test-service"}, + {Name: "attr1", Value: "value"}, + } + + res, err := c.OTLPPushExemplar("series_1", "", pmetric.AggregationTemporalityDelta, labels...) + require.NoError(t, err) + require.Equal(t, 200, res.StatusCode) + + value, err := c.Query("series_1", now) + require.NoError(t, err) + vector, ok := value.(model.Vector) + require.True(t, ok) + require.Equal(t, 1, len(vector)) +} diff --git a/integration/parquet_querier_test.go b/integration/parquet_querier_test.go index ca31a019c9..e085cef99d 100644 --- a/integration/parquet_querier_test.go +++ b/integration/parquet_querier_test.go @@ -63,8 +63,9 @@ func TestParquetFuzz(t *testing.T) { "-store-gateway.sharding-enabled": "false", "--querier.store-gateway-addresses": "nonExistent", // Make sure we do not call Store gateways // alert manager - "-alertmanager.web.external-url": "http://localhost/alertmanager", - "-frontend.query-vertical-shard-size": "1", + "-alertmanager.web.external-url": "http://localhost/alertmanager", + // Enable vertical sharding. + "-frontend.query-vertical-shard-size": "3", "-frontend.max-cache-freshness": "1m", // enable experimental promQL funcs "-querier.enable-promql-experimental-functions": "true", @@ -98,19 +99,8 @@ func TestParquetFuzz(t *testing.T) { end := now.Add(-time.Hour) for i := 0; i < numSeries; i++ { - lbls = append(lbls, labels.Labels{ - {Name: labels.MetricName, Value: "test_series_a"}, - {Name: "job", Value: "test"}, - {Name: "series", Value: strconv.Itoa(i % 3)}, - {Name: "status_code", Value: statusCodes[i%5]}, - }) - - lbls = append(lbls, labels.Labels{ - {Name: labels.MetricName, Value: "test_series_b"}, - {Name: "job", Value: "test"}, - {Name: "series", Value: strconv.Itoa((i + 1) % 3)}, - {Name: "status_code", Value: statusCodes[(i+1)%5]}, - }) + lbls = append(lbls, labels.FromStrings(labels.MetricName, "test_series_a", "job", "test", "series", strconv.Itoa(i%3), "status_code", statusCodes[i%5])) + lbls = append(lbls, labels.FromStrings(labels.MetricName, "test_series_b", "job", "test", "series", strconv.Itoa((i+1)%3), "status_code", statusCodes[(i+1)%5])) } id, err := e2e.CreateBlock(ctx, rnd, dir, lbls, numSamples, start.UnixMilli(), end.UnixMilli(), scrapeInterval.Milliseconds(), 10) require.NoError(t, err) @@ -130,16 +120,20 @@ func TestParquetFuzz(t *testing.T) { // Wait until we convert the blocks cortex_testutil.Poll(t, 30*time.Second, true, func() interface{} { found := false + foundBucketIndex := false err := bkt.Iter(context.Background(), "", func(name string) error { fmt.Println(name) if name == fmt.Sprintf("parquet-markers/%v-parquet-converter-mark.json", id.String()) { found = true } + if name == "bucket-index.json.gz" { + foundBucketIndex = true + } return nil }, objstore.WithRecursiveIter()) require.NoError(t, err) - return found + return found && foundBucketIndex }) att, err := bkt.Attributes(context.Background(), "bucket-index.json.gz") @@ -178,7 +172,7 @@ func TestParquetFuzz(t *testing.T) { } ps := promqlsmith.New(rnd, lbls, opts...) - runQueryFuzzTestCases(t, ps, c1, c2, end, start, end, scrapeInterval, 500, false) + runQueryFuzzTestCases(t, ps, c1, c2, end, start, end, scrapeInterval, 1000, false) require.NoError(t, cortex.WaitSumMetricsWithOptions(e2e.Greater(0), []string{"cortex_parquet_queryable_blocks_queried_total"}, e2e.WithLabelMatchers( labels.MustNewMatcher(labels.MatchEqual, "type", "parquet")))) diff --git a/integration/parse_query_api_test.go b/integration/parse_query_api_test.go new file mode 100644 index 0000000000..06db800a92 --- /dev/null +++ b/integration/parse_query_api_test.go @@ -0,0 +1,135 @@ +//go:build requires_docker +// +build requires_docker + +package integration + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/cortexproject/cortex/integration/e2e" + e2edb "github.com/cortexproject/cortex/integration/e2e/db" + "github.com/cortexproject/cortex/integration/e2ecortex" +) + +func TestParseQueryAPIQuerier(t *testing.T) { + s, err := e2e.NewScenario(networkName) + require.NoError(t, err) + defer s.Close() + + // Start dependencies. + consul := e2edb.NewConsul() + minio := e2edb.NewMinio(9000, bucketName) + require.NoError(t, s.StartAndWaitReady(consul, minio)) + + flags := mergeFlags(BlocksStorageFlags(), map[string]string{ + "-auth.enabled": "true", + }) + + distributor := e2ecortex.NewDistributor("distributor", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), flags, "") + ingester := e2ecortex.NewIngester("ingester", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), flags, "") + require.NoError(t, s.StartAndWaitReady(distributor, ingester)) + + // Wait until the distributor has updated the ring. + require.NoError(t, distributor.WaitSumMetrics(e2e.Equals(512), "cortex_ring_tokens_total")) + + querier := e2ecortex.NewQuerier("querier", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), flags, "") + require.NoError(t, s.StartAndWaitReady(querier)) + + // Wait until the querier has updated the ring. + require.NoError(t, querier.WaitSumMetrics(e2e.Equals(512), "cortex_ring_tokens_total")) + + endpoint := fmt.Sprintf("http://%s/api/prom/api/v1/parse_query?query=foo/bar", querier.HTTPEndpoint()) + + req, err := http.NewRequest("GET", endpoint, nil) + require.NoError(t, err) + req.Header.Set("X-Scope-OrgID", "user-1") + + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err) + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + require.NoError(t, err) + + require.Equal(t, http.StatusOK, resp.StatusCode) + + var parsed struct { + Status string `json:"status"` + Data json.RawMessage `json:"data"` + } + require.NoError(t, json.Unmarshal(body, &parsed)) + require.Equal(t, "success", parsed.Status) + + // check for AST contents. + require.Contains(t, string(parsed.Data), "\"op\":\"/\"") + require.Contains(t, string(parsed.Data), `"lhs":{"matchers":[{"name":"__name__","type":"=","value":"foo"}]`) + require.Contains(t, string(parsed.Data), `"rhs":{"matchers":[{"name":"__name__","type":"=","value":"bar"}]`) +} + +func TestParseQueryAPIQueryFrontend(t *testing.T) { + s, err := e2e.NewScenario(networkName) + require.NoError(t, err) + defer s.Close() + + // Start dependencies. + consul := e2edb.NewConsul() + minio := e2edb.NewMinio(9000, bucketName) + require.NoError(t, s.StartAndWaitReady(consul, minio)) + + flags := mergeFlags(BlocksStorageFlags(), map[string]string{ + "-auth.enabled": "true", + }) + + // Start the query-frontend. + queryFrontend := e2ecortex.NewQueryFrontend("query-frontend", flags, "") + require.NoError(t, s.Start(queryFrontend)) + + distributor := e2ecortex.NewDistributor("distributor", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), flags, "") + ingester := e2ecortex.NewIngester("ingester", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), flags, "") + require.NoError(t, s.StartAndWaitReady(distributor, ingester)) + + // Wait until both the distributor updated the ring. + require.NoError(t, distributor.WaitSumMetrics(e2e.Equals(512), "cortex_ring_tokens_total")) + + querier := e2ecortex.NewQuerier("querierWithFrontend", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), mergeFlags(flags, map[string]string{ + "-querier.frontend-address": queryFrontend.NetworkGRPCEndpoint(), + }), "") + + require.NoError(t, s.StartAndWaitReady(querier)) + require.NoError(t, s.WaitReady(queryFrontend)) + + require.NoError(t, querier.WaitSumMetrics(e2e.Equals(512), "cortex_ring_tokens_total")) + + endpoint := fmt.Sprintf("http://%s/api/prom/api/v1/parse_query?query=foo/bar", queryFrontend.HTTPEndpoint()) + + req, err := http.NewRequest("GET", endpoint, nil) + require.NoError(t, err) + req.Header.Set("X-Scope-OrgID", "user-1") + + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err) + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + require.NoError(t, err) + + require.Equal(t, http.StatusOK, resp.StatusCode) + + var parsed struct { + Status string `json:"status"` + Data json.RawMessage `json:"data"` + } + require.NoError(t, json.Unmarshal(body, &parsed)) + require.Equal(t, "success", parsed.Status) + + // check for AST contents. + require.Contains(t, string(parsed.Data), "\"op\":\"/\"") + require.Contains(t, string(parsed.Data), `"lhs":{"matchers":[{"name":"__name__","type":"=","value":"foo"}]`) + require.Contains(t, string(parsed.Data), `"rhs":{"matchers":[{"name":"__name__","type":"=","value":"bar"}]`) +} diff --git a/integration/querier_test.go b/integration/querier_test.go index 7e16b587db..27929ba5d8 100644 --- a/integration/querier_test.go +++ b/integration/querier_test.go @@ -1375,3 +1375,78 @@ func TestQuerierEngineConfigs(t *testing.T) { } } + +func TestQuerierDistributedExecution(t *testing.T) { + // e2e test setup + s, err := e2e.NewScenario(networkName) + require.NoError(t, err) + defer s.Close() + + // initialize the flags + flags := mergeFlags( + BlocksStorageFlags(), + map[string]string{ + "-blocks-storage.tsdb.block-ranges-period": (5 * time.Second).String(), + "-blocks-storage.tsdb.ship-interval": "1s", + "-blocks-storage.tsdb.retention-period": ((5 * time.Second * 2) - 1).String(), + "-querier.thanos-engine": "true", + // enable distributed execution (logical plan execution) + "-querier.distributed-exec-enabled": "true", + }, + ) + + minio := e2edb.NewMinio(9000, flags["-blocks-storage.s3.bucket-name"]) + consul := e2edb.NewConsul() + require.NoError(t, s.StartAndWaitReady(consul, minio)) + + // start services + distributor := e2ecortex.NewDistributor("distributor", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), flags, "") + ingester := e2ecortex.NewIngester("ingester", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), flags, "") + queryScheduler := e2ecortex.NewQueryScheduler("query-scheduler", flags, "") + storeGateway := e2ecortex.NewStoreGateway("store-gateway", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), flags, "") + require.NoError(t, s.StartAndWaitReady(queryScheduler, distributor, ingester, storeGateway)) + flags = mergeFlags(flags, map[string]string{ + "-querier.store-gateway-addresses": strings.Join([]string{storeGateway.NetworkGRPCEndpoint()}, ","), + }) + + queryFrontend := e2ecortex.NewQueryFrontend("query-frontend", mergeFlags(flags, map[string]string{ + "-frontend.scheduler-address": queryScheduler.NetworkGRPCEndpoint(), + }), "") + require.NoError(t, s.Start(queryFrontend)) + + querier := e2ecortex.NewQuerier("querier", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), mergeFlags(flags, map[string]string{ + "-querier.scheduler-address": queryScheduler.NetworkGRPCEndpoint(), + }), "") + require.NoError(t, s.StartAndWaitReady(querier)) + + // wait until the distributor and querier has updated the ring. + require.NoError(t, distributor.WaitSumMetrics(e2e.Equals(512), "cortex_ring_tokens_total")) + require.NoError(t, querier.WaitSumMetrics(e2e.Equals(2*512), "cortex_ring_tokens_total")) + + c, err := e2ecortex.NewClient(distributor.HTTPEndpoint(), queryFrontend.HTTPEndpoint(), "", "", "user-1") + require.NoError(t, err) + + series1Timestamp := time.Now() + series2Timestamp := series1Timestamp.Add(time.Minute * 1) + series1, expectedVector1 := generateSeries("series_1", series1Timestamp, prompb.Label{Name: "series_1", Value: "series_1"}) + series2, expectedVector2 := generateSeries("series_2", series2Timestamp, prompb.Label{Name: "series_2", Value: "series_2"}) + + res, err := c.Push(series1) + require.NoError(t, err) + require.Equal(t, 200, res.StatusCode) + + res, err = c.Push(series2) + require.NoError(t, err) + require.Equal(t, 200, res.StatusCode) + + // main tests + // - make sure queries are still executable with distributed execution enabled + var val model.Value + val, err = c.Query("series_1", series1Timestamp) + require.NoError(t, err) + require.Equal(t, expectedVector1, val.(model.Vector)) + + val, err = c.Query("series_2", series2Timestamp) + require.NoError(t, err) + require.Equal(t, expectedVector2, val.(model.Vector)) +} diff --git a/integration/query_frontend_test.go b/integration/query_frontend_test.go index 6d7b0651d7..7a68b64a69 100644 --- a/integration/query_frontend_test.go +++ b/integration/query_frontend_test.go @@ -38,7 +38,6 @@ import ( type queryFrontendTestConfig struct { testMissingMetricName bool querySchedulerEnabled bool - queryStatsEnabled bool remoteReadEnabled bool testSubQueryStepSize bool setup func(t *testing.T, s *e2e.Scenario) (configFile string, flags map[string]string) @@ -61,7 +60,6 @@ func TestQueryFrontendWithBlocksStorageViaFlags(t *testing.T) { func TestQueryFrontendWithBlocksStorageViaFlagsAndQueryStatsEnabled(t *testing.T) { runQueryFrontendTest(t, queryFrontendTestConfig{ testMissingMetricName: false, - queryStatsEnabled: true, setup: func(t *testing.T, s *e2e.Scenario) (configFile string, flags map[string]string) { flags = BlocksStorageFlags() @@ -92,7 +90,6 @@ func TestQueryFrontendWithBlocksStorageViaFlagsAndWithQuerySchedulerAndQueryStat runQueryFrontendTest(t, queryFrontendTestConfig{ testMissingMetricName: false, querySchedulerEnabled: true, - queryStatsEnabled: true, setup: func(t *testing.T, s *e2e.Scenario) (configFile string, flags map[string]string) { flags = BlocksStorageFlags() @@ -168,7 +165,6 @@ func TestQueryFrontendWithVerticalSharding(t *testing.T) { runQueryFrontendTest(t, queryFrontendTestConfig{ testMissingMetricName: false, querySchedulerEnabled: false, - queryStatsEnabled: true, setup: func(t *testing.T, s *e2e.Scenario) (configFile string, flags map[string]string) { require.NoError(t, writeFileToSharedDir(s, cortexConfigFile, []byte(BlocksStorageConfig))) @@ -188,7 +184,6 @@ func TestQueryFrontendWithVerticalShardingQueryScheduler(t *testing.T) { runQueryFrontendTest(t, queryFrontendTestConfig{ testMissingMetricName: false, querySchedulerEnabled: true, - queryStatsEnabled: true, setup: func(t *testing.T, s *e2e.Scenario) (configFile string, flags map[string]string) { require.NoError(t, writeFileToSharedDir(s, cortexConfigFile, []byte(BlocksStorageConfig))) @@ -208,7 +203,6 @@ func TestQueryFrontendProtobufCodec(t *testing.T) { runQueryFrontendTest(t, queryFrontendTestConfig{ testMissingMetricName: false, querySchedulerEnabled: true, - queryStatsEnabled: true, setup: func(t *testing.T, s *e2e.Scenario) (configFile string, flags map[string]string) { require.NoError(t, writeFileToSharedDir(s, cortexConfigFile, []byte(BlocksStorageConfig))) @@ -216,14 +210,33 @@ func TestQueryFrontendProtobufCodec(t *testing.T) { require.NoError(t, s.StartAndWaitReady(minio)) flags = mergeFlags(e2e.EmptyFlags(), map[string]string{ - "-api.querier-default-codec": "protobuf", - "-querier.response-compression": "gzip", + "-api.querier-default-codec": "protobuf", }) return cortexConfigFile, flags }, }) } +func TestQuerierToQueryFrontendCompression(t *testing.T) { + for _, compression := range []string{"gzip", "zstd", "snappy", ""} { + runQueryFrontendTest(t, queryFrontendTestConfig{ + testMissingMetricName: false, + querySchedulerEnabled: true, + setup: func(t *testing.T, s *e2e.Scenario) (configFile string, flags map[string]string) { + require.NoError(t, writeFileToSharedDir(s, cortexConfigFile, []byte(BlocksStorageConfig))) + + minio := e2edb.NewMinio(9000, BlocksStorageFlags()["-blocks-storage.s3.bucket-name"]) + require.NoError(t, s.StartAndWaitReady(minio)) + + flags = mergeFlags(e2e.EmptyFlags(), map[string]string{ + "-querier.response-compression": compression, + }) + return cortexConfigFile, flags + }, + }) + } +} + func TestQueryFrontendRemoteRead(t *testing.T) { runQueryFrontendTest(t, queryFrontendTestConfig{ remoteReadEnabled: true, @@ -274,7 +287,7 @@ func runQueryFrontendTest(t *testing.T, cfg queryFrontendTestConfig) { "-querier.split-queries-by-interval": "24h", "-querier.query-ingesters-within": "12h", // Required by the test on query /series out of ingesters time range "-frontend.memcached.addresses": "dns+" + memcached.NetworkEndpoint(e2ecache.MemcachedPort), - "-frontend.query-stats-enabled": strconv.FormatBool(cfg.queryStatsEnabled), + "-frontend.query-stats-enabled": "true", // Always enable query stats to capture regressions }) // Start the query-scheduler if enabled. @@ -362,7 +375,7 @@ func runQueryFrontendTest(t *testing.T, cfg queryFrontendTestConfig) { } // No need to repeat the test on Server-Timing header for each user. - if userID == 0 && cfg.queryStatsEnabled { + if userID == 0 { res, _, err := c.QueryRaw("{instance=~\"hello.*\"}", time.Now(), map[string]string{}) require.NoError(t, err) require.Regexp(t, "querier_wall_time;dur=[0-9.]*, response_time;dur=[0-9.]*$", res.Header.Values("Server-Timing")[0]) @@ -413,15 +426,11 @@ func runQueryFrontendTest(t *testing.T, cfg queryFrontendTestConfig) { wg.Wait() - extra := float64(2) + extra := float64(3) // Always include query stats test if cfg.testMissingMetricName { extra++ } - if cfg.queryStatsEnabled { - extra++ - } - if cfg.remoteReadEnabled { extra++ } @@ -438,15 +447,11 @@ func runQueryFrontendTest(t *testing.T, cfg queryFrontendTestConfig) { require.NoError(t, querier.WaitSumMetricsWithOptions(e2e.Greater(numUsers*numQueriesPerUser), []string{"cortex_request_duration_seconds"}, e2e.WithMetricCount)) require.NoError(t, querier.WaitSumMetricsWithOptions(e2e.Greater(numUsers*numQueriesPerUser), []string{"cortex_querier_request_duration_seconds"}, e2e.WithMetricCount)) - // Ensure query stats metrics are tracked only when enabled. - if cfg.queryStatsEnabled { - require.NoError(t, queryFrontend.WaitSumMetricsWithOptions( - e2e.Greater(0), - []string{"cortex_query_seconds_total"}, - e2e.WithLabelMatchers(labels.MustNewMatcher(labels.MatchEqual, "user", "user-1")))) - } else { - require.NoError(t, queryFrontend.WaitRemovedMetric("cortex_query_seconds_total")) - } + // Ensure query stats metrics are always tracked to capture regressions. + require.NoError(t, queryFrontend.WaitSumMetricsWithOptions( + e2e.Greater(0), + []string{"cortex_query_seconds_total"}, + e2e.WithLabelMatchers(labels.MustNewMatcher(labels.MatchEqual, "user", "user-1")))) // Ensure no service-specific metrics prefix is used by the wrong service. assertServiceMetricsPrefixes(t, Distributor, distributor) diff --git a/integration/query_fuzz_test.go b/integration/query_fuzz_test.go index d4c501737e..b12560be6f 100644 --- a/integration/query_fuzz_test.go +++ b/integration/query_fuzz_test.go @@ -108,19 +108,8 @@ func TestNativeHistogramFuzz(t *testing.T) { scrapeInterval := time.Minute statusCodes := []string{"200", "400", "404", "500", "502"} for i := 0; i < numSeries; i++ { - lbls = append(lbls, labels.Labels{ - {Name: labels.MetricName, Value: "test_series_a"}, - {Name: "job", Value: "test"}, - {Name: "series", Value: strconv.Itoa(i % 3)}, - {Name: "status_code", Value: statusCodes[i%5]}, - }) - - lbls = append(lbls, labels.Labels{ - {Name: labels.MetricName, Value: "test_series_b"}, - {Name: "job", Value: "test"}, - {Name: "series", Value: strconv.Itoa((i + 1) % 3)}, - {Name: "status_code", Value: statusCodes[(i+1)%5]}, - }) + lbls = append(lbls, labels.FromStrings(labels.MetricName, "test_series_a", "job", "test", "series", strconv.Itoa(i%3), "status_code", statusCodes[i%5])) + lbls = append(lbls, labels.FromStrings(labels.MetricName, "test_series_b", "job", "test", "series", strconv.Itoa((i+1)%3), "status_code", statusCodes[(i+1)%5])) } ctx := context.Background() @@ -147,7 +136,7 @@ func TestNativeHistogramFuzz(t *testing.T) { err = writeFileToSharedDir(s, "prometheus.yml", []byte("")) require.NoError(t, err) - prom := e2edb.NewPrometheus("quay.io/prometheus/prometheus:v3.3.1", nil) + prom := e2edb.NewPrometheus("", nil) require.NoError(t, s.StartAndWaitReady(prom)) c2, err := e2ecortex.NewPromQueryClient(prom.HTTPEndpoint()) @@ -221,19 +210,8 @@ func TestExperimentalPromQLFuncsWithPrometheus(t *testing.T) { scrapeInterval := time.Minute statusCodes := []string{"200", "400", "404", "500", "502"} for i := 0; i < numSeries; i++ { - lbls = append(lbls, labels.Labels{ - {Name: labels.MetricName, Value: "test_series_a"}, - {Name: "job", Value: "test"}, - {Name: "series", Value: strconv.Itoa(i % 3)}, - {Name: "status_code", Value: statusCodes[i%5]}, - }) - - lbls = append(lbls, labels.Labels{ - {Name: labels.MetricName, Value: "test_series_b"}, - {Name: "job", Value: "test"}, - {Name: "series", Value: strconv.Itoa((i + 1) % 3)}, - {Name: "status_code", Value: statusCodes[(i+1)%5]}, - }) + lbls = append(lbls, labels.FromStrings(labels.MetricName, "test_series_a", "job", "test", "series", strconv.Itoa(i%3), "status_code", statusCodes[i%5])) + lbls = append(lbls, labels.FromStrings(labels.MetricName, "test_series_b", "job", "test", "series", strconv.Itoa((i+1)%3), "status_code", statusCodes[(i+1)%5])) } ctx := context.Background() @@ -799,7 +777,7 @@ func TestVerticalShardingFuzz(t *testing.T) { } ps := promqlsmith.New(rnd, lbls, opts...) - runQueryFuzzTestCases(t, ps, c1, c2, now, start, end, scrapeInterval, 1000, false) + runQueryFuzzTestCases(t, ps, c1, c2, end, start, end, scrapeInterval, 1000, false) } func TestProtobufCodecFuzz(t *testing.T) { @@ -1209,13 +1187,7 @@ func TestStoreGatewayLazyExpandedPostingsSeriesFuzz(t *testing.T) { metricName := "http_requests_total" statusCodes := []string{"200", "400", "404", "500", "502"} for i := 0; i < numSeries; i++ { - lbl := labels.Labels{ - {Name: labels.MetricName, Value: metricName}, - {Name: "job", Value: "test"}, - {Name: "series", Value: strconv.Itoa(i % 200)}, - {Name: "status_code", Value: statusCodes[i%5]}, - } - lbls = append(lbls, lbl) + lbls = append(lbls, labels.FromStrings(labels.MetricName, metricName, "job", "test", "series", strconv.Itoa(i%200), "status_code", statusCodes[i%5])) } ctx := context.Background() rnd := rand.New(rand.NewSource(time.Now().Unix())) @@ -1367,13 +1339,7 @@ func TestStoreGatewayLazyExpandedPostingsSeriesFuzzWithPrometheus(t *testing.T) metricName := "http_requests_total" statusCodes := []string{"200", "400", "404", "500", "502"} for i := 0; i < numSeries; i++ { - lbl := labels.Labels{ - {Name: labels.MetricName, Value: metricName}, - {Name: "job", Value: "test"}, - {Name: "series", Value: strconv.Itoa(i % 200)}, - {Name: "status_code", Value: statusCodes[i%5]}, - } - lbls = append(lbls, lbl) + lbls = append(lbls, labels.FromStrings(labels.MetricName, metricName, "job", "test", "series", strconv.Itoa(i%200), "status_code", statusCodes[i%5])) } ctx := context.Background() rnd := rand.New(rand.NewSource(time.Now().Unix())) @@ -1673,19 +1639,8 @@ func TestPrometheusCompatibilityQueryFuzz(t *testing.T) { scrapeInterval := time.Minute statusCodes := []string{"200", "400", "404", "500", "502"} for i := 0; i < numSeries; i++ { - lbls = append(lbls, labels.Labels{ - {Name: labels.MetricName, Value: "test_series_a"}, - {Name: "job", Value: "test"}, - {Name: "series", Value: strconv.Itoa(i % 3)}, - {Name: "status_code", Value: statusCodes[i%5]}, - }) - - lbls = append(lbls, labels.Labels{ - {Name: labels.MetricName, Value: "test_series_b"}, - {Name: "job", Value: "test"}, - {Name: "series", Value: strconv.Itoa((i + 1) % 3)}, - {Name: "status_code", Value: statusCodes[(i+1)%5]}, - }) + lbls = append(lbls, labels.FromStrings(labels.MetricName, "test_series_a", "job", "test", "series", strconv.Itoa(i%3), "status_code", statusCodes[i%5])) + lbls = append(lbls, labels.FromStrings(labels.MetricName, "test_series_b", "job", "test", "series", strconv.Itoa((i+1)%3), "status_code", statusCodes[(i+1)%5])) } ctx := context.Background() @@ -1838,7 +1793,7 @@ func runQueryFuzzTestCases(t *testing.T, ps *promqlsmith.PromQLSmith, c1, c2 *e2 failures++ } } else if !cmp.Equal(tc.res1, tc.res2, comparer) { - t.Logf("case %d results mismatch.\n%s: %s\nres1: %s\nres2: %s\n", i, qt, tc.query, tc.res1.String(), tc.res2.String()) + t.Logf("case %d results mismatch.\n%s: %s\nres1 len: %d data: %s\nres2 len: %d data: %s\n", i, qt, tc.query, resultLength(tc.res1), tc.res1.String(), resultLength(tc.res2), tc.res2.String()) failures++ } } @@ -1872,3 +1827,17 @@ func isValidQuery(generatedQuery parser.Expr, skipStdAggregations bool) bool { } return isValid } + +func resultLength(x model.Value) int { + vx, xvec := x.(model.Vector) + if xvec { + return vx.Len() + } + + mx, xMatrix := x.(model.Matrix) + if xMatrix { + return mx.Len() + } + // Other type, return 0 + return 0 +} diff --git a/integration/remote_write_v2_test.go b/integration/remote_write_v2_test.go new file mode 100644 index 0000000000..8ba26447f6 --- /dev/null +++ b/integration/remote_write_v2_test.go @@ -0,0 +1,464 @@ +//go:build integration_remote_write_v2 +// +build integration_remote_write_v2 + +package integration + +import ( + "math/rand" + "net/http" + "path" + "testing" + "time" + + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/prompb" + writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2" + "github.com/prometheus/prometheus/tsdb/tsdbutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/cortexproject/cortex/integration/e2e" + e2edb "github.com/cortexproject/cortex/integration/e2e/db" + "github.com/cortexproject/cortex/integration/e2ecortex" + "github.com/cortexproject/cortex/pkg/storage/tsdb" +) + +func TestIngesterRollingUpdate(t *testing.T) { + // Test ingester rolling update situation: when -distributor.remote-writev2-enabled is true, and ingester uses the v1.19.0 image. + // Expected: remote write 2.0 push success + const blockRangePeriod = 5 * time.Second + ingesterImage := "quay.io/cortexproject/cortex:v1.19.0" + + s, err := e2e.NewScenario(networkName) + require.NoError(t, err) + defer s.Close() + + // Start dependencies. + consul := e2edb.NewConsulWithName("consul") + require.NoError(t, s.StartAndWaitReady(consul)) + + flags := mergeFlags( + AlertmanagerLocalFlags(), + map[string]string{ + "-store.engine": blocksStorageEngine, + "-blocks-storage.backend": "filesystem", + "-blocks-storage.tsdb.head-compaction-interval": "4m", + "-blocks-storage.bucket-store.sync-interval": "15m", + "-blocks-storage.bucket-store.index-cache.backend": tsdb.IndexCacheBackendInMemory, + "-blocks-storage.bucket-store.bucket-index.enabled": "true", + "-querier.query-store-for-labels-enabled": "true", + "-blocks-storage.tsdb.block-ranges-period": blockRangePeriod.String(), + "-blocks-storage.tsdb.ship-interval": "1s", + "-blocks-storage.tsdb.retention-period": ((blockRangePeriod * 2) - 1).String(), + "-blocks-storage.tsdb.enable-native-histograms": "true", + // Ingester. + "-ring.store": "consul", + "-consul.hostname": consul.NetworkHTTPEndpoint(), + // Distributor. + "-distributor.replication-factor": "1", + // Store-gateway. + "-store-gateway.sharding-enabled": "false", + // alert manager + "-alertmanager.web.external-url": "http://localhost/alertmanager", + }, + ) + + distributorFlag := mergeFlags(flags, map[string]string{ + "-distributor.remote-writev2-enabled": "true", + }) + + // make alert manager config dir + require.NoError(t, writeFileToSharedDir(s, "alertmanager_configs", []byte{})) + + path := path.Join(s.SharedDir(), "cortex-1") + + flags = mergeFlags(flags, map[string]string{"-blocks-storage.filesystem.dir": path}) + // Start Cortex replicas. + // Start all other services. + ingester := e2ecortex.NewIngester("ingester", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), flags, ingesterImage) + distributor := e2ecortex.NewDistributor("distributor", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), distributorFlag, "") + storeGateway := e2ecortex.NewStoreGateway("store-gateway", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), flags, "") + querier := e2ecortex.NewQuerier("querier", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), mergeFlags(flags, map[string]string{ + "-querier.store-gateway-addresses": storeGateway.NetworkGRPCEndpoint()}), "") + + require.NoError(t, s.StartAndWaitReady(querier, ingester, distributor, storeGateway)) + + // Wait until Cortex replicas have updated the ring state. + require.NoError(t, distributor.WaitSumMetrics(e2e.Equals(512), "cortex_ring_tokens_total")) + require.NoError(t, querier.WaitSumMetrics(e2e.Equals(512), "cortex_ring_tokens_total")) + + c, err := e2ecortex.NewClient(distributor.HTTPEndpoint(), querier.HTTPEndpoint(), "", "", "user-1") + require.NoError(t, err) + + now := time.Now() + + // series push + symbols1, series, expectedVector := e2e.GenerateSeriesV2("test_series", now, prompb.Label{Name: "job", Value: "test"}, prompb.Label{Name: "foo", Value: "bar"}) + res, err := c.PushV2(symbols1, series) + require.NoError(t, err) + require.Equal(t, 200, res.StatusCode) + testPushHeader(t, res.Header, "1", "0", "0") + + // sample + result, err := c.Query("test_series", now) + require.NoError(t, err) + assert.Equal(t, expectedVector, result.(model.Vector)) + + // metadata + metadata, err := c.Metadata("test_series", "") + require.NoError(t, err) + require.Equal(t, 1, len(metadata["test_series"])) + + // histogram + histogramIdx := rand.Uint32() + symbols2, histogramSeries := e2e.GenerateHistogramSeriesV2("test_histogram", now, histogramIdx, false, prompb.Label{Name: "job", Value: "test"}, prompb.Label{Name: "float", Value: "false"}) + res, err = c.PushV2(symbols2, histogramSeries) + require.NoError(t, err) + require.Equal(t, 200, res.StatusCode) + testPushHeader(t, res.Header, "0", "1", "0") + + symbols3, histogramFloatSeries := e2e.GenerateHistogramSeriesV2("test_histogram", now, histogramIdx, true, prompb.Label{Name: "job", Value: "test"}, prompb.Label{Name: "float", Value: "true"}) + res, err = c.PushV2(symbols3, histogramFloatSeries) + require.NoError(t, err) + require.Equal(t, 200, res.StatusCode) + testPushHeader(t, res.Header, "0", "1", "0") + + testHistogramTimestamp := now.Add(blockRangePeriod * 2) + expectedHistogram := tsdbutil.GenerateTestHistogram(int64(histogramIdx)) + result, err = c.Query(`test_histogram`, testHistogramTimestamp) + require.NoError(t, err) + require.Equal(t, model.ValVector, result.Type()) + v := result.(model.Vector) + require.Equal(t, 2, v.Len()) + for _, s := range v { + require.NotNil(t, s.Histogram) + require.Equal(t, float64(expectedHistogram.Count), float64(s.Histogram.Count)) + require.Equal(t, float64(expectedHistogram.Sum), float64(s.Histogram.Sum)) + } +} + +func TestIngest_SenderSendPRW2_DistributorNotAllowPRW2(t *testing.T) { + // Test `-distributor.remote-writev2-enabled=false` but the Sender pushes PRW2 + // Expected: status code is 200, but samples are not written. + const blockRangePeriod = 5 * time.Second + + s, err := e2e.NewScenario(networkName) + require.NoError(t, err) + defer s.Close() + + // Start dependencies. + consul := e2edb.NewConsulWithName("consul") + require.NoError(t, s.StartAndWaitReady(consul)) + + flags := mergeFlags( + AlertmanagerLocalFlags(), + map[string]string{ + "-store.engine": blocksStorageEngine, + "-blocks-storage.backend": "filesystem", + "-blocks-storage.tsdb.head-compaction-interval": "4m", + "-blocks-storage.bucket-store.sync-interval": "15m", + "-blocks-storage.bucket-store.index-cache.backend": tsdb.IndexCacheBackendInMemory, + "-blocks-storage.bucket-store.bucket-index.enabled": "true", + "-querier.query-store-for-labels-enabled": "true", + "-blocks-storage.tsdb.block-ranges-period": blockRangePeriod.String(), + "-blocks-storage.tsdb.ship-interval": "1s", + "-blocks-storage.tsdb.retention-period": ((blockRangePeriod * 2) - 1).String(), + "-blocks-storage.tsdb.enable-native-histograms": "true", + // Ingester. + "-ring.store": "consul", + "-consul.hostname": consul.NetworkHTTPEndpoint(), + // Distributor. + "-distributor.replication-factor": "1", + "-distributor.remote-writev2-enabled": "false", + // Store-gateway. + "-store-gateway.sharding-enabled": "false", + // alert manager + "-alertmanager.web.external-url": "http://localhost/alertmanager", + }, + ) + + // make alert manager config dir + require.NoError(t, writeFileToSharedDir(s, "alertmanager_configs", []byte{})) + + path := path.Join(s.SharedDir(), "cortex-1") + + flags = mergeFlags(flags, map[string]string{"-blocks-storage.filesystem.dir": path}) + // Start Cortex replicas. + cortex := e2ecortex.NewSingleBinary("cortex", flags, "") + require.NoError(t, s.StartAndWaitReady(cortex)) + + // Wait until Cortex replicas have updated the ring state. + require.NoError(t, cortex.WaitSumMetrics(e2e.Equals(float64(512)), "cortex_ring_tokens_total")) + + c, err := e2ecortex.NewClient(cortex.HTTPEndpoint(), cortex.HTTPEndpoint(), "", "", "user-1") + require.NoError(t, err) + + now := time.Now() + + // series push + symbols1, series, _ := e2e.GenerateSeriesV2("test_series", now, prompb.Label{Name: "job", Value: "test"}, prompb.Label{Name: "foo", Value: "bar"}) + res, err := c.PushV2(symbols1, series) + require.NoError(t, err) + require.Equal(t, 200, res.StatusCode) + + // sample + result, err := c.Query("test_series", now) + require.NoError(t, err) + require.Empty(t, result) +} + +func TestIngest(t *testing.T) { + const blockRangePeriod = 5 * time.Second + + s, err := e2e.NewScenario(networkName) + require.NoError(t, err) + defer s.Close() + + // Start dependencies. + consul := e2edb.NewConsulWithName("consul") + require.NoError(t, s.StartAndWaitReady(consul)) + + flags := mergeFlags( + AlertmanagerLocalFlags(), + map[string]string{ + "-store.engine": blocksStorageEngine, + "-blocks-storage.backend": "filesystem", + "-blocks-storage.tsdb.head-compaction-interval": "4m", + "-blocks-storage.bucket-store.sync-interval": "15m", + "-blocks-storage.bucket-store.index-cache.backend": tsdb.IndexCacheBackendInMemory, + "-blocks-storage.bucket-store.bucket-index.enabled": "true", + "-querier.query-store-for-labels-enabled": "true", + "-blocks-storage.tsdb.block-ranges-period": blockRangePeriod.String(), + "-blocks-storage.tsdb.ship-interval": "1s", + "-blocks-storage.tsdb.retention-period": ((blockRangePeriod * 2) - 1).String(), + "-blocks-storage.tsdb.enable-native-histograms": "true", + // Ingester. + "-ring.store": "consul", + "-consul.hostname": consul.NetworkHTTPEndpoint(), + // Distributor. + "-distributor.replication-factor": "1", + "-distributor.remote-writev2-enabled": "true", + // Store-gateway. + "-store-gateway.sharding-enabled": "false", + // alert manager + "-alertmanager.web.external-url": "http://localhost/alertmanager", + }, + ) + + // make alert manager config dir + require.NoError(t, writeFileToSharedDir(s, "alertmanager_configs", []byte{})) + + path := path.Join(s.SharedDir(), "cortex-1") + + flags = mergeFlags(flags, map[string]string{"-blocks-storage.filesystem.dir": path}) + // Start Cortex replicas. + cortex := e2ecortex.NewSingleBinary("cortex", flags, "") + require.NoError(t, s.StartAndWaitReady(cortex)) + + // Wait until Cortex replicas have updated the ring state. + require.NoError(t, cortex.WaitSumMetrics(e2e.Equals(float64(512)), "cortex_ring_tokens_total")) + + c, err := e2ecortex.NewClient(cortex.HTTPEndpoint(), cortex.HTTPEndpoint(), "", "", "user-1") + require.NoError(t, err) + + now := time.Now() + + // series push + symbols1, series, expectedVector := e2e.GenerateSeriesV2("test_series", now, prompb.Label{Name: "job", Value: "test"}, prompb.Label{Name: "foo", Value: "bar"}) + res, err := c.PushV2(symbols1, series) + require.NoError(t, err) + require.Equal(t, 200, res.StatusCode) + testPushHeader(t, res.Header, "1", "0", "0") + + // sample + result, err := c.Query("test_series", now) + require.NoError(t, err) + assert.Equal(t, expectedVector, result.(model.Vector)) + + // metadata + metadata, err := c.Metadata("test_series", "") + require.NoError(t, err) + require.Equal(t, 1, len(metadata["test_series"])) + + // histogram + histogramIdx := rand.Uint32() + symbols2, histogramSeries := e2e.GenerateHistogramSeriesV2("test_histogram", now, histogramIdx, false, prompb.Label{Name: "job", Value: "test"}, prompb.Label{Name: "float", Value: "false"}) + res, err = c.PushV2(symbols2, histogramSeries) + require.NoError(t, err) + require.Equal(t, 200, res.StatusCode) + testPushHeader(t, res.Header, "0", "1", "0") + + // float histogram + symbols3, histogramFloatSeries := e2e.GenerateHistogramSeriesV2("test_histogram", now, histogramIdx, true, prompb.Label{Name: "job", Value: "test"}, prompb.Label{Name: "float", Value: "true"}) + res, err = c.PushV2(symbols3, histogramFloatSeries) + require.NoError(t, err) + require.Equal(t, 200, res.StatusCode) + testPushHeader(t, res.Header, "0", "1", "0") + + testHistogramTimestamp := now.Add(blockRangePeriod * 2) + expectedHistogram := tsdbutil.GenerateTestHistogram(int64(histogramIdx)) + result, err = c.Query(`test_histogram`, testHistogramTimestamp) + require.NoError(t, err) + require.Equal(t, model.ValVector, result.Type()) + v := result.(model.Vector) + require.Equal(t, 2, v.Len()) + for _, s := range v { + require.NotNil(t, s.Histogram) + require.Equal(t, float64(expectedHistogram.Count), float64(s.Histogram.Count)) + require.Equal(t, float64(expectedHistogram.Sum), float64(s.Histogram.Sum)) + } +} + +func TestExemplar(t *testing.T) { + s, err := e2e.NewScenario(networkName) + require.NoError(t, err) + defer s.Close() + + // Start dependencies. + consul := e2edb.NewConsulWithName("consul") + require.NoError(t, s.StartAndWaitReady(consul)) + + flags := mergeFlags( + AlertmanagerLocalFlags(), + map[string]string{ + "-store.engine": blocksStorageEngine, + "-blocks-storage.backend": "filesystem", + "-blocks-storage.tsdb.head-compaction-interval": "4m", + "-blocks-storage.bucket-store.sync-interval": "15m", + "-blocks-storage.bucket-store.index-cache.backend": tsdb.IndexCacheBackendInMemory, + "-blocks-storage.bucket-store.bucket-index.enabled": "true", + "-querier.query-store-for-labels-enabled": "true", + "-blocks-storage.tsdb.ship-interval": "1s", + "-blocks-storage.tsdb.enable-native-histograms": "true", + // Ingester. + "-ring.store": "consul", + "-consul.hostname": consul.NetworkHTTPEndpoint(), + "-ingester.max-exemplars": "100", + // Distributor. + "-distributor.replication-factor": "1", + "-distributor.remote-writev2-enabled": "true", + // Store-gateway. + "-store-gateway.sharding-enabled": "false", + // alert manager + "-alertmanager.web.external-url": "http://localhost/alertmanager", + }, + ) + + // make alert manager config dir + require.NoError(t, writeFileToSharedDir(s, "alertmanager_configs", []byte{})) + + path := path.Join(s.SharedDir(), "cortex-1") + + flags = mergeFlags(flags, map[string]string{"-blocks-storage.filesystem.dir": path}) + // Start Cortex replicas. + cortex := e2ecortex.NewSingleBinary("cortex", flags, "") + require.NoError(t, s.StartAndWaitReady(cortex)) + + // Wait until Cortex replicas have updated the ring state. + require.NoError(t, cortex.WaitSumMetrics(e2e.Equals(float64(512)), "cortex_ring_tokens_total")) + + c, err := e2ecortex.NewClient(cortex.HTTPEndpoint(), cortex.HTTPEndpoint(), "", "", "user-1") + require.NoError(t, err) + + now := time.Now() + tsMillis := e2e.TimeToMilliseconds(now) + + symbols := []string{"", "__name__", "test_metric", "b", "c", "baz", "qux", "d", "e", "foo", "bar", "f", "g", "h", "i", "Test gauge for test purposes", "Maybe op/sec who knows (:", "Test counter for test purposes"} + timeseries := []writev2.TimeSeries{ + { + LabelsRefs: []uint32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, // Symbolized writeRequestFixture.Timeseries[0].Labels + Metadata: writev2.Metadata{ + Type: writev2.Metadata_METRIC_TYPE_COUNTER, // writeV2RequestSeries1Metadata.Type. + + HelpRef: 15, // Symbolized writeV2RequestSeries1Metadata.Help. + UnitRef: 16, // Symbolized writeV2RequestSeries1Metadata.Unit. + }, + Samples: []writev2.Sample{{Value: 1, Timestamp: tsMillis}}, + Exemplars: []writev2.Exemplar{{LabelsRefs: []uint32{11, 12}, Value: 1, Timestamp: tsMillis}}, + }, + } + + res, err := c.PushV2(symbols, timeseries) + require.NoError(t, err) + require.Equal(t, 200, res.StatusCode) + testPushHeader(t, res.Header, "1", "0", "1") + + start := time.Now().Add(-time.Minute) + end := now.Add(time.Minute) + + exemplars, err := c.QueryExemplars("test_metric", start, end) + require.NoError(t, err) + require.Equal(t, 1, len(exemplars)) +} + +func Test_WriteStatWithReplication(t *testing.T) { + // Test `X-Prometheus-Remote-Write-Samples-Written` header value + // with the replication. + s, err := e2e.NewScenario(networkName) + require.NoError(t, err) + defer s.Close() + + // Start dependencies. + consul := e2edb.NewConsulWithName("consul") + require.NoError(t, s.StartAndWaitReady(consul)) + + flags := mergeFlags( + AlertmanagerLocalFlags(), + map[string]string{ + "-store.engine": blocksStorageEngine, + "-blocks-storage.backend": "filesystem", + "-blocks-storage.tsdb.head-compaction-interval": "4m", + "-blocks-storage.bucket-store.sync-interval": "15m", + "-blocks-storage.bucket-store.index-cache.backend": tsdb.IndexCacheBackendInMemory, + "-blocks-storage.bucket-store.bucket-index.enabled": "true", + "-querier.query-store-for-labels-enabled": "true", + "-blocks-storage.tsdb.ship-interval": "1s", + "-blocks-storage.tsdb.enable-native-histograms": "true", + // Ingester. + "-ring.store": "consul", + "-consul.hostname": consul.NetworkHTTPEndpoint(), + "-ingester.max-exemplars": "100", + // Distributor. + "-distributor.replication-factor": "3", + "-distributor.remote-writev2-enabled": "true", + // Store-gateway. + "-store-gateway.sharding-enabled": "false", + // alert manager + "-alertmanager.web.external-url": "http://localhost/alertmanager", + }, + ) + + // Start Cortex components. + distributor := e2ecortex.NewDistributor("distributor", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), flags, "") + ingester1 := e2ecortex.NewIngester("ingester-1", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), flags, "") + ingester2 := e2ecortex.NewIngester("ingester-2", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), flags, "") + ingester3 := e2ecortex.NewIngester("ingester-3", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), flags, "") + require.NoError(t, s.StartAndWaitReady(distributor, ingester1, ingester2, ingester3)) + + // Wait until distributor have updated the ring. + require.NoError(t, distributor.WaitSumMetricsWithOptions(e2e.Equals(3), []string{"cortex_ring_members"}, e2e.WithLabelMatchers( + labels.MustNewMatcher(labels.MatchEqual, "name", "ingester"), + labels.MustNewMatcher(labels.MatchEqual, "state", "ACTIVE")))) + + c, err := e2ecortex.NewClient(distributor.HTTPEndpoint(), "", "", "", "user-1") + require.NoError(t, err) + + now := time.Now() + + // series push + start := now.Add(-time.Minute * 10) + numSamples := 20 + scrapeInterval := 30 * time.Second + symbols, series := e2e.GenerateV2SeriesWithSamples("test_series", start, scrapeInterval, 0, numSamples, prompb.Label{Name: "job", Value: "test"}) + res, err := c.PushV2(symbols, []writev2.TimeSeries{series}) + require.NoError(t, err) + require.Equal(t, 200, res.StatusCode) + testPushHeader(t, res.Header, "20", "0", "0") +} + +func testPushHeader(t *testing.T, header http.Header, expectedSamples, expectedHistogram, expectedExemplars string) { + require.Equal(t, expectedSamples, header.Get("X-Prometheus-Remote-Write-Samples-Written")) + require.Equal(t, expectedHistogram, header.Get("X-Prometheus-Remote-Write-Histograms-Written")) + require.Equal(t, expectedExemplars, header.Get("X-Prometheus-Remote-Write-Exemplars-Written")) +} diff --git a/integration/ruler_test.go b/integration/ruler_test.go index 5a9a2d4261..48bdaff551 100644 --- a/integration/ruler_test.go +++ b/integration/ruler_test.go @@ -504,14 +504,14 @@ func testRulerAPIWithSharding(t *testing.T, enableRulesBackup bool) { assert.NoError(t, json.Unmarshal(responseJson, ar)) if !ar.LastEvaluation.IsZero() { // Labels will be merged only if groups are loaded to Prometheus rule manager - assert.Equal(t, 5, len(ar.Labels)) + assert.Equal(t, 5, ar.Labels.Len()) } - for _, label := range ar.Labels { - if label.Name == "duplicate_label" { + ar.Labels.Range(func(l labels.Label) { + if l.Name == "duplicate_label" { // rule label should override group label - assert.Equal(t, ruleLabels["duplicate_label"], label.Value) + assert.Equal(t, ruleLabels["duplicate_label"], l.Value) } - } + }) } }, }, diff --git a/pkg/alertmanager/alertmanager_http.go b/pkg/alertmanager/alertmanager_http.go index 1b27ef7b9e..2a313b3700 100644 --- a/pkg/alertmanager/alertmanager_http.go +++ b/pkg/alertmanager/alertmanager_http.go @@ -96,12 +96,12 @@ type StatusHandler struct { // ServeHTTP serves the status of the alertmanager. func (s StatusHandler) ServeHTTP(w http.ResponseWriter, _ *http.Request) { - var clusterInfo map[string]interface{} + var clusterInfo map[string]any if s.am.peer != nil { clusterInfo = s.am.peer.Info() } err := statusTemplate.Execute(w, struct { - ClusterInfo map[string]interface{} + ClusterInfo map[string]any }{ ClusterInfo: clusterInfo, }) diff --git a/pkg/alertmanager/alertmanager_http_test.go b/pkg/alertmanager/alertmanager_http_test.go index 987221593a..126de01695 100644 --- a/pkg/alertmanager/alertmanager_http_test.go +++ b/pkg/alertmanager/alertmanager_http_test.go @@ -1,7 +1,6 @@ package alertmanager import ( - "context" "io" "net/http/httptest" "testing" @@ -14,8 +13,7 @@ import ( ) func TestMultitenantAlertmanager_GetStatusHandler(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() var peer *cluster.Peer { logger := promslog.NewNopLogger() diff --git a/pkg/alertmanager/alertmanager_ring.go b/pkg/alertmanager/alertmanager_ring.go index 90430137b0..33d72daeeb 100644 --- a/pkg/alertmanager/alertmanager_ring.go +++ b/pkg/alertmanager/alertmanager_ring.go @@ -43,12 +43,13 @@ var SyncRingOp = ring.NewOp([]ring.InstanceState{ring.ACTIVE, ring.JOINING}, fun // is used to strip down the config to the minimum, and avoid confusion // to the user. type RingConfig struct { - KVStore kv.Config `yaml:"kvstore" doc:"description=The key-value store used to share the hash ring across multiple instances."` - HeartbeatPeriod time.Duration `yaml:"heartbeat_period"` - HeartbeatTimeout time.Duration `yaml:"heartbeat_timeout"` - ReplicationFactor int `yaml:"replication_factor"` - ZoneAwarenessEnabled bool `yaml:"zone_awareness_enabled"` - TokensFilePath string `yaml:"tokens_file_path"` + KVStore kv.Config `yaml:"kvstore" doc:"description=The key-value store used to share the hash ring across multiple instances."` + HeartbeatPeriod time.Duration `yaml:"heartbeat_period"` + HeartbeatTimeout time.Duration `yaml:"heartbeat_timeout"` + ReplicationFactor int `yaml:"replication_factor"` + ZoneAwarenessEnabled bool `yaml:"zone_awareness_enabled"` + TokensFilePath string `yaml:"tokens_file_path"` + DetailedMetricsEnabled bool `yaml:"detailed_metrics_enabled"` FinalSleep time.Duration `yaml:"final_sleep"` WaitInstanceStateTimeout time.Duration `yaml:"wait_instance_state_timeout"` @@ -88,6 +89,7 @@ func (cfg *RingConfig) RegisterFlags(f *flag.FlagSet) { f.IntVar(&cfg.ReplicationFactor, rfprefix+"replication-factor", 3, "The replication factor to use when sharding the alertmanager.") f.BoolVar(&cfg.ZoneAwarenessEnabled, rfprefix+"zone-awareness-enabled", false, "True to enable zone-awareness and replicate alerts across different availability zones.") f.StringVar(&cfg.TokensFilePath, rfprefix+"tokens-file-path", "", "File path where tokens are stored. If empty, tokens are not stored at shutdown and restored at startup.") + f.BoolVar(&cfg.DetailedMetricsEnabled, rfprefix+"detailed-metrics-enabled", true, "Set to true to enable ring detailed metrics. These metrics provide detailed information, such as token count and ownership per tenant. Disabling them can significantly decrease the number of metrics emitted.") // Instance flags cfg.InstanceInterfaceNames = []string{"eth0", "en0"} @@ -134,6 +136,7 @@ func (cfg *RingConfig) ToRingConfig() ring.Config { rc.HeartbeatTimeout = cfg.HeartbeatTimeout rc.ReplicationFactor = cfg.ReplicationFactor rc.ZoneAwarenessEnabled = cfg.ZoneAwarenessEnabled + rc.DetailedMetricsEnabled = cfg.DetailedMetricsEnabled return rc } diff --git a/pkg/alertmanager/alertmanager_ring_test.go b/pkg/alertmanager/alertmanager_ring_test.go index 3e4d460252..ec1f3008fa 100644 --- a/pkg/alertmanager/alertmanager_ring_test.go +++ b/pkg/alertmanager/alertmanager_ring_test.go @@ -45,7 +45,6 @@ func TestIsHealthyForAlertmanagerOperations(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { actual := testData.instance.IsHealthy(RingOp, testData.timeout, time.Now()) diff --git a/pkg/alertmanager/alertmanager_test.go b/pkg/alertmanager/alertmanager_test.go index c4ed3064fa..54d4cd4584 100644 --- a/pkg/alertmanager/alertmanager_test.go +++ b/pkg/alertmanager/alertmanager_test.go @@ -48,7 +48,7 @@ func TestSilencesLimits(t *testing.T) { } // create silences up to maxSilencesCount - for i := 0; i < maxSilencesCount; i++ { + for range maxSilencesCount { err := am.silences.Set(createSilences()) require.NoError(t, err) } @@ -136,7 +136,7 @@ route: now := time.Now() - for i := 0; i < alertGroups; i++ { + for i := range alertGroups { alertName := model.LabelValue(fmt.Sprintf("Alert-%d", i)) inputAlerts := []*types.Alert{ @@ -174,7 +174,7 @@ route: } // Give it some time, as alerts are sent to dispatcher asynchronously. - test.Poll(t, 3*time.Second, nil, func() interface{} { + test.Poll(t, 3*time.Second, nil, func() any { return testutil.GatherAndCompare(reg, strings.NewReader(fmt.Sprintf(` # HELP alertmanager_dispatcher_aggregation_group_limit_reached_total Number of times when dispatcher failed to create new aggregation group due to limit. # TYPE alertmanager_dispatcher_aggregation_group_limit_reached_total counter diff --git a/pkg/alertmanager/alertstore/bucketclient/bucket_client.go b/pkg/alertmanager/alertstore/bucketclient/bucket_client.go index 4252f6703b..7a2d3dad2b 100644 --- a/pkg/alertmanager/alertstore/bucketclient/bucket_client.go +++ b/pkg/alertmanager/alertstore/bucketclient/bucket_client.go @@ -75,7 +75,7 @@ func (s *BucketAlertStore) GetAlertConfigs(ctx context.Context, userIDs []string cfgs = make(map[string]alertspb.AlertConfigDesc, len(userIDs)) ) - err := concurrency.ForEach(ctx, concurrency.CreateJobsFromStrings(userIDs), fetchConcurrency, func(ctx context.Context, job interface{}) error { + err := concurrency.ForEach(ctx, concurrency.CreateJobsFromStrings(userIDs), fetchConcurrency, func(ctx context.Context, job any) error { userID := job.(string) cfg, uBucket, err := s.getAlertConfig(ctx, userID) diff --git a/pkg/alertmanager/alertstore/config.go b/pkg/alertmanager/alertstore/config.go index bca00768d7..5d32e6dd9e 100644 --- a/pkg/alertmanager/alertstore/config.go +++ b/pkg/alertmanager/alertstore/config.go @@ -2,6 +2,7 @@ package alertstore import ( "flag" + "slices" "github.com/cortexproject/cortex/pkg/alertmanager/alertstore/configdb" "github.com/cortexproject/cortex/pkg/alertmanager/alertstore/local" @@ -28,10 +29,5 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { // IsFullStateSupported returns if the given configuration supports access to FullState objects. func (cfg *Config) IsFullStateSupported() bool { - for _, backend := range bucket.SupportedBackends { - if cfg.Backend == backend { - return true - } - } - return false + return slices.Contains(bucket.SupportedBackends, cfg.Backend) } diff --git a/pkg/alertmanager/alertstore/store_test.go b/pkg/alertmanager/alertstore/store_test.go index fd7fb2816a..2796b6ed04 100644 --- a/pkg/alertmanager/alertstore/store_test.go +++ b/pkg/alertmanager/alertstore/store_test.go @@ -21,7 +21,7 @@ var ( ) func TestAlertStore_ListAllUsers(t *testing.T) { - runForEachAlertStore(t, func(t *testing.T, store AlertStore, m *mockBucket, client interface{}) { + runForEachAlertStore(t, func(t *testing.T, store AlertStore, m *mockBucket, client any) { ctx := context.Background() user1Cfg := alertspb.AlertConfigDesc{User: "user-1", RawConfig: "content-1"} user2Cfg := alertspb.AlertConfigDesc{User: "user-2", RawConfig: "content-2"} @@ -46,7 +46,7 @@ func TestAlertStore_ListAllUsers(t *testing.T) { } func TestAlertStore_SetAndGetAlertConfig(t *testing.T) { - runForEachAlertStore(t, func(t *testing.T, store AlertStore, m *mockBucket, client interface{}) { + runForEachAlertStore(t, func(t *testing.T, store AlertStore, m *mockBucket, client any) { ctx := context.Background() user1Cfg := alertspb.AlertConfigDesc{User: "user-1", RawConfig: "content-1"} user2Cfg := alertspb.AlertConfigDesc{User: "user-2", RawConfig: "content-2"} @@ -84,7 +84,7 @@ func TestAlertStore_SetAndGetAlertConfig(t *testing.T) { } func TestStore_GetAlertConfigs(t *testing.T) { - runForEachAlertStore(t, func(t *testing.T, store AlertStore, m *mockBucket, client interface{}) { + runForEachAlertStore(t, func(t *testing.T, store AlertStore, m *mockBucket, client any) { ctx := context.Background() user1Cfg := alertspb.AlertConfigDesc{User: "user-1", RawConfig: "content-1"} user2Cfg := alertspb.AlertConfigDesc{User: "user-2", RawConfig: "content-2"} @@ -129,7 +129,7 @@ func TestStore_GetAlertConfigs(t *testing.T) { } func TestAlertStore_DeleteAlertConfig(t *testing.T) { - runForEachAlertStore(t, func(t *testing.T, store AlertStore, m *mockBucket, client interface{}) { + runForEachAlertStore(t, func(t *testing.T, store AlertStore, m *mockBucket, client any) { ctx := context.Background() user1Cfg := alertspb.AlertConfigDesc{User: "user-1", RawConfig: "content-1"} user2Cfg := alertspb.AlertConfigDesc{User: "user-2", RawConfig: "content-2"} @@ -169,14 +169,14 @@ func TestAlertStore_DeleteAlertConfig(t *testing.T) { }) } -func runForEachAlertStore(t *testing.T, testFn func(t *testing.T, store AlertStore, b *mockBucket, client interface{})) { +func runForEachAlertStore(t *testing.T, testFn func(t *testing.T, store AlertStore, b *mockBucket, client any)) { bucketClient := objstore.NewInMemBucket() mBucketClient := &mockBucket{Bucket: bucketClient} bucketStore := bucketclient.NewBucketAlertStore(mBucketClient, nil, log.NewNopLogger()) stores := map[string]struct { store AlertStore - client interface{} + client any }{ "bucket": {store: bucketStore, client: mBucketClient}, } @@ -188,7 +188,7 @@ func runForEachAlertStore(t *testing.T, testFn func(t *testing.T, store AlertSto } } -func objectExists(bucketClient interface{}, key string) (bool, error) { +func objectExists(bucketClient any, key string) (bool, error) { if typed, ok := bucketClient.(objstore.Bucket); ok { return typed.Exists(context.Background(), key) } diff --git a/pkg/alertmanager/api.go b/pkg/alertmanager/api.go index f546bbd4ce..cbac5bd89c 100644 --- a/pkg/alertmanager/api.go +++ b/pkg/alertmanager/api.go @@ -283,7 +283,7 @@ func (am *MultitenantAlertmanager) ListAllConfigs(w http.ResponseWriter, r *http } done := make(chan struct{}) - iter := make(chan interface{}) + iter := make(chan any) go func() { util.StreamWriteYAMLResponse(w, iter, logger) @@ -321,7 +321,7 @@ func (am *MultitenantAlertmanager) ListAllConfigs(w http.ResponseWriter, r *http // validateAlertmanagerConfig recursively scans the input config looking for data types for which // we have a specific validation and, whenever encountered, it runs their validation. Returns the // first error or nil if validation succeeds. -func validateAlertmanagerConfig(cfg interface{}) error { +func validateAlertmanagerConfig(cfg any) error { v := reflect.ValueOf(cfg) t := v.Type() diff --git a/pkg/alertmanager/api_test.go b/pkg/alertmanager/api_test.go index e70af95247..8c0a097d84 100644 --- a/pkg/alertmanager/api_test.go +++ b/pkg/alertmanager/api_test.go @@ -867,7 +867,7 @@ receivers: func TestValidateAlertmanagerConfig(t *testing.T) { tests := map[string]struct { - input interface{} + input any expected error }{ "*HTTPClientConfig": { diff --git a/pkg/alertmanager/distributor_test.go b/pkg/alertmanager/distributor_test.go index beb2277e74..fed453c3a8 100644 --- a/pkg/alertmanager/distributor_test.go +++ b/pkg/alertmanager/distributor_test.go @@ -287,7 +287,7 @@ func TestDistributor_DistributeRequest(t *testing.T) { // Since the response is sent as soon as the quorum is reached, when we // reach this point the 3rd AM may not have received the request yet. // To avoid flaky test we retry until we hit the desired state within a reasonable timeout. - test.Poll(t, time.Second, c.expectedTotalCalls, func() interface{} { + test.Poll(t, time.Second, c.expectedTotalCalls, func() any { totalReqCount := 0 for _, a := range ams { reqCount := a.requestsCount(route) @@ -306,7 +306,7 @@ func TestDistributor_DistributeRequest(t *testing.T) { func prepare(t *testing.T, numAM, numHappyAM, replicationFactor int, responseBody []byte) (*Distributor, []*mockAlertmanager, func()) { ams := []*mockAlertmanager{} remainingFailure := atomic.NewInt32(int32(numAM - numHappyAM)) - for i := 0; i < numAM; i++ { + for i := range numAM { ams = append(ams, newMockAlertmanager(i, remainingFailure, responseBody)) } @@ -329,7 +329,7 @@ func prepare(t *testing.T, numAM, numHappyAM, replicationFactor int, responseBod t.Cleanup(func() { assert.NoError(t, closer.Close()) }) err := kvStore.CAS(context.Background(), RingKey, - func(_ interface{}) (interface{}, bool, error) { + func(_ any) (any, bool, error) { return &ring.Desc{ Ingesters: amDescs, }, true, nil @@ -346,7 +346,7 @@ func prepare(t *testing.T, numAM, numHappyAM, replicationFactor int, responseBod }, RingNameForServer, RingKey, nil, nil) require.NoError(t, err) require.NoError(t, services.StartAndAwaitRunning(context.Background(), amRing)) - test.Poll(t, time.Second, numAM, func() interface{} { + test.Poll(t, time.Second, numAM, func() any { return amRing.InstancesCount() }) diff --git a/pkg/alertmanager/multitenant.go b/pkg/alertmanager/multitenant.go index 47b02d36d1..1a3e2b3c07 100644 --- a/pkg/alertmanager/multitenant.go +++ b/pkg/alertmanager/multitenant.go @@ -1153,7 +1153,7 @@ func (am *MultitenantAlertmanager) ReadFullStateForUser(ctx context.Context, use // Note that the jobs swallow the errors - this is because we want to give each replica a chance to respond. jobs := concurrency.CreateJobsFromStrings(addrs) - err = concurrency.ForEach(ctx, jobs, len(jobs), func(ctx context.Context, job interface{}) error { + err = concurrency.ForEach(ctx, jobs, len(jobs), func(ctx context.Context, job any) error { addr := job.(string) level.Debug(am.logger).Log("msg", "contacting replica for full state", "user", userID, "addr", addr) diff --git a/pkg/alertmanager/multitenant_test.go b/pkg/alertmanager/multitenant_test.go index acbf9eb66a..fe00b3c94e 100644 --- a/pkg/alertmanager/multitenant_test.go +++ b/pkg/alertmanager/multitenant_test.go @@ -195,7 +195,7 @@ receivers: reg := prometheus.NewPedanticRegistry() am, err := createMultitenantAlertmanager(cfg, nil, nil, store, nil, nil, log.NewNopLogger(), reg) require.NoError(t, err) - for i := 0; i < 5; i++ { + for range 5 { err = am.loadAndSyncConfigs(context.Background(), reasonPeriodic) require.NoError(t, err) require.Len(t, am.alertmanagers, 2) @@ -1128,7 +1128,7 @@ func TestMultitenantAlertmanager_InitialSyncWithSharding(t *testing.T) { // Setup the initial instance state in the ring. if tt.existing { - require.NoError(t, ringStore.CAS(ctx, RingKey, func(in interface{}) (interface{}, bool, error) { + require.NoError(t, ringStore.CAS(ctx, RingKey, func(in any) (any, bool, error) { ringDesc := ring.GetOrCreateRingDesc(in) ringDesc.AddIngester(amConfig.ShardingRing.InstanceID, amConfig.ShardingRing.InstanceAddr, "", tt.initialTokens, tt.initialState, time.Now()) return ringDesc, true, nil @@ -1529,7 +1529,7 @@ func TestMultitenantAlertmanager_SyncOnRingTopologyChanges(t *testing.T) { am, err := createMultitenantAlertmanager(amConfig, nil, nil, alertStore, ringStore, nil, log.NewNopLogger(), reg) require.NoError(t, err) - require.NoError(t, ringStore.CAS(ctx, RingKey, func(in interface{}) (interface{}, bool, error) { + require.NoError(t, ringStore.CAS(ctx, RingKey, func(in any) (any, bool, error) { ringDesc := ring.GetOrCreateRingDesc(in) tt.setupRing(ringDesc) return ringDesc, true, nil @@ -1545,7 +1545,7 @@ func TestMultitenantAlertmanager_SyncOnRingTopologyChanges(t *testing.T) { assert.Equal(t, float64(1), metrics.GetSumOfCounters("cortex_alertmanager_sync_configs_total")) // Change the ring topology. - require.NoError(t, ringStore.CAS(ctx, RingKey, func(in interface{}) (interface{}, bool, error) { + require.NoError(t, ringStore.CAS(ctx, RingKey, func(in any) (any, bool, error) { ringDesc := ring.GetOrCreateRingDesc(in) tt.updateRing(ringDesc) return ringDesc, true, nil @@ -1556,7 +1556,7 @@ func TestMultitenantAlertmanager_SyncOnRingTopologyChanges(t *testing.T) { if tt.expected { expectedSyncs++ } - test.Poll(t, 3*time.Second, float64(expectedSyncs), func() interface{} { + test.Poll(t, 3*time.Second, float64(expectedSyncs), func() any { metrics := regs.BuildMetricFamiliesPerUser() return metrics.GetSumOfCounters("cortex_alertmanager_sync_configs_total") }) @@ -1584,7 +1584,7 @@ func TestMultitenantAlertmanager_RingLifecyclerShouldAutoForgetUnhealthyInstance defer services.StopAndAwaitTerminated(ctx, am) //nolint:errcheck tg := ring.NewRandomTokenGenerator() - require.NoError(t, ringStore.CAS(ctx, RingKey, func(in interface{}) (interface{}, bool, error) { + require.NoError(t, ringStore.CAS(ctx, RingKey, func(in any) (any, bool, error) { ringDesc := ring.GetOrCreateRingDesc(in) instance := ringDesc.AddIngester(unhealthyInstanceID, "127.0.0.1", "", tg.GenerateTokens(ringDesc, unhealthyInstanceID, "", RingNumTokens, true), ring.ACTIVE, time.Now()) instance.Timestamp = time.Now().Add(-(ringAutoForgetUnhealthyPeriods + 1) * heartbeatTimeout).Unix() @@ -1593,7 +1593,7 @@ func TestMultitenantAlertmanager_RingLifecyclerShouldAutoForgetUnhealthyInstance return ringDesc, true, nil })) - test.Poll(t, time.Second, false, func() interface{} { + test.Poll(t, time.Second, false, func() any { d, err := ringStore.Get(ctx, RingKey) if err != nil { return err @@ -2066,7 +2066,7 @@ func TestAlertmanager_StateReplicationWithSharding_InitialSyncFromPeers(t *testi } // 2.c. Wait for the silence replication to be attempted; note this is asynchronous. { - test.Poll(t, 5*time.Second, float64(1), func() interface{} { + test.Poll(t, 5*time.Second, float64(1), func() any { metrics := registries.BuildMetricFamiliesPerUser() return metrics.GetSumOfCounters("cortex_alertmanager_state_replication_total") }) diff --git a/pkg/alertmanager/rate_limited_notifier_test.go b/pkg/alertmanager/rate_limited_notifier_test.go index 1d35c9d99a..28de624cb5 100644 --- a/pkg/alertmanager/rate_limited_notifier_test.go +++ b/pkg/alertmanager/rate_limited_notifier_test.go @@ -44,7 +44,7 @@ func runNotifications(t *testing.T, rateLimitedNotifier *rateLimitedNotifier, co success := 0 rateLimited := 0 - for i := 0; i < count; i++ { + for range count { retry, err := rateLimitedNotifier.Notify(context.Background(), &types.Alert{}) switch err { diff --git a/pkg/api/api.go b/pkg/api/api.go index ec02f72e76..e124fec3e6 100644 --- a/pkg/api/api.go +++ b/pkg/api/api.go @@ -45,7 +45,7 @@ import ( // DistributorPushWrapper wraps around a push. It is similar to middleware.Interface. type DistributorPushWrapper func(next push.Func) push.Func -type ConfigHandler func(actualCfg interface{}, defaultCfg interface{}) http.HandlerFunc +type ConfigHandler func(actualCfg any, defaultCfg any) http.HandlerFunc type Config struct { ResponseCompression bool `yaml:"response_compression_enabled"` @@ -71,6 +71,10 @@ type Config struct { // Allows and is used to configure the addition of HTTP Header fields to logs HTTPRequestHeadersToLog flagext.StringSlice `yaml:"http_request_headers_to_log"` + // HTTP header that can be used as request id. It will always be included in logs + // If it's not provided, or this header is empty, then random requestId will be generated + RequestIdHeader string `yaml:"request_id_header"` + // This sets the Origin header value corsRegexString string `yaml:"cors_origin"` @@ -87,6 +91,7 @@ var ( func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.BoolVar(&cfg.ResponseCompression, "api.response-compression-enabled", false, "Use GZIP compression for API responses. Some endpoints serve large YAML or JSON blobs which can benefit from compression.") f.Var(&cfg.HTTPRequestHeadersToLog, "api.http-request-headers-to-log", "Which HTTP Request headers to add to logs") + f.StringVar(&cfg.RequestIdHeader, "api.request-id-header", "", "HTTP header that can be used as request id") f.BoolVar(&cfg.buildInfoEnabled, "api.build-info-enabled", false, "If enabled, build Info API will be served by query frontend or querier.") f.StringVar(&cfg.QuerierDefaultCodec, "api.querier-default-codec", "json", "Choose default codec for querier response serialization. Supports 'json' and 'protobuf'.") cfg.RegisterFlagsWithPrefix("", f) @@ -169,8 +174,9 @@ func New(cfg Config, serverCfg server.Config, s *server.Server, logger log.Logge if cfg.HTTPAuthMiddleware == nil { api.AuthMiddleware = middleware.AuthenticateUser } - if len(cfg.HTTPRequestHeadersToLog) > 0 { - api.HTTPHeaderMiddleware = &HTTPHeaderMiddleware{TargetHeaders: cfg.HTTPRequestHeadersToLog} + api.HTTPHeaderMiddleware = &HTTPHeaderMiddleware{ + TargetHeaders: cfg.HTTPRequestHeadersToLog, + RequestIdHeader: cfg.RequestIdHeader, } return api, nil @@ -256,7 +262,7 @@ func (a *API) RegisterAlertmanager(am *alertmanager.MultitenantAlertmanager, tar } // RegisterAPI registers the standard endpoints associated with a running Cortex. -func (a *API) RegisterAPI(httpPathPrefix string, actualCfg interface{}, defaultCfg interface{}) { +func (a *API) RegisterAPI(httpPathPrefix string, actualCfg any, defaultCfg any) { a.indexPage.AddLink(SectionAdminEndpoints, "/config", "Current Config (including the default values)") a.indexPage.AddLink(SectionAdminEndpoints, "/config?mode=diff", "Current Config (show only values that differ from the defaults)") @@ -277,7 +283,7 @@ func (a *API) RegisterRuntimeConfig(runtimeConfigHandler http.HandlerFunc) { func (a *API) RegisterDistributor(d *distributor.Distributor, pushConfig distributor.Config, overrides *validation.Overrides) { distributorpb.RegisterDistributorServer(a.server.GRPC, d) - a.RegisterRoute("/api/v1/push", push.Handler(pushConfig.MaxRecvMsgSize, a.sourceIPs, a.cfg.wrapDistributorPush(d)), true, "POST") + a.RegisterRoute("/api/v1/push", push.Handler(pushConfig.RemoteWriteV2Enabled, pushConfig.MaxRecvMsgSize, a.sourceIPs, a.cfg.wrapDistributorPush(d)), true, "POST") a.RegisterRoute("/api/v1/otlp/v1/metrics", push.OTLPHandler(pushConfig.OTLPMaxRecvMsgSize, overrides, pushConfig.OTLPConfig, a.sourceIPs, a.cfg.wrapDistributorPush(d)), true, "POST") a.indexPage.AddLink(SectionAdminEndpoints, "/distributor/ring", "Distributor Ring Status") @@ -289,7 +295,7 @@ func (a *API) RegisterDistributor(d *distributor.Distributor, pushConfig distrib a.RegisterRoute("/distributor/ha_tracker", d.HATracker, false, "GET") // Legacy Routes - a.RegisterRoute(path.Join(a.cfg.LegacyHTTPPrefix, "/push"), push.Handler(pushConfig.MaxRecvMsgSize, a.sourceIPs, a.cfg.wrapDistributorPush(d)), true, "POST") + a.RegisterRoute(path.Join(a.cfg.LegacyHTTPPrefix, "/push"), push.Handler(pushConfig.RemoteWriteV2Enabled, pushConfig.MaxRecvMsgSize, a.sourceIPs, a.cfg.wrapDistributorPush(d)), true, "POST") a.RegisterRoute("/all_user_stats", http.HandlerFunc(d.AllUserStatsHandler), false, "GET") a.RegisterRoute("/ha-tracker", d.HATracker, false, "GET") } @@ -322,12 +328,12 @@ func (a *API) RegisterIngester(i Ingester, pushConfig distributor.Config) { a.RegisterRoute("/ingester/renewTokens", http.HandlerFunc(i.RenewTokenHandler), false, "GET", "POST") a.RegisterRoute("/ingester/all_user_stats", http.HandlerFunc(i.AllUserStatsHandler), false, "GET") a.RegisterRoute("/ingester/mode", http.HandlerFunc(i.ModeHandler), false, "GET", "POST") - a.RegisterRoute("/ingester/push", push.Handler(pushConfig.MaxRecvMsgSize, a.sourceIPs, i.Push), true, "POST") // For testing and debugging. + a.RegisterRoute("/ingester/push", push.Handler(pushConfig.RemoteWriteV2Enabled, pushConfig.MaxRecvMsgSize, a.sourceIPs, i.Push), true, "POST") // For testing and debugging. // Legacy Routes a.RegisterRoute("/flush", http.HandlerFunc(i.FlushHandler), false, "GET", "POST") a.RegisterRoute("/shutdown", http.HandlerFunc(i.ShutdownHandler), false, "GET", "POST") - a.RegisterRoute("/push", push.Handler(pushConfig.MaxRecvMsgSize, a.sourceIPs, i.Push), true, "POST") // For testing and debugging. + a.RegisterRoute("/push", push.Handler(pushConfig.RemoteWriteV2Enabled, pushConfig.MaxRecvMsgSize, a.sourceIPs, i.Push), true, "POST") // For testing and debugging. } func (a *API) RegisterTenantDeletion(api *purger.TenantDeletionAPI) { @@ -431,6 +437,7 @@ func (a *API) RegisterQueryAPI(handler http.Handler) { a.RegisterRoute(path.Join(a.cfg.PrometheusHTTPPrefix, "/api/v1/query_range"), hf, true, "GET", "POST") a.RegisterRoute(path.Join(a.cfg.PrometheusHTTPPrefix, "/api/v1/query_exemplars"), hf, true, "GET", "POST") a.RegisterRoute(path.Join(a.cfg.PrometheusHTTPPrefix, "/api/v1/format_query"), hf, true, "GET", "POST") + a.RegisterRoute(path.Join(a.cfg.PrometheusHTTPPrefix, "/api/v1/parse_query"), hf, true, "GET", "POST") a.RegisterRoute(path.Join(a.cfg.PrometheusHTTPPrefix, "/api/v1/labels"), hf, true, "GET", "POST") a.RegisterRoute(path.Join(a.cfg.PrometheusHTTPPrefix, "/api/v1/label/{name}/values"), hf, true, "GET") a.RegisterRoute(path.Join(a.cfg.PrometheusHTTPPrefix, "/api/v1/series"), hf, true, "GET", "POST", "DELETE") @@ -442,6 +449,7 @@ func (a *API) RegisterQueryAPI(handler http.Handler) { a.RegisterRoute(path.Join(a.cfg.LegacyHTTPPrefix, "/api/v1/query_range"), hf, true, "GET", "POST") a.RegisterRoute(path.Join(a.cfg.LegacyHTTPPrefix, "/api/v1/query_exemplars"), hf, true, "GET", "POST") a.RegisterRoute(path.Join(a.cfg.LegacyHTTPPrefix, "/api/v1/format_query"), hf, true, "GET", "POST") + a.RegisterRoute(path.Join(a.cfg.LegacyHTTPPrefix, "/api/v1/parse_query"), hf, true, "GET", "POST") a.RegisterRoute(path.Join(a.cfg.LegacyHTTPPrefix, "/api/v1/labels"), hf, true, "GET", "POST") a.RegisterRoute(path.Join(a.cfg.LegacyHTTPPrefix, "/api/v1/label/{name}/values"), hf, true, "GET") a.RegisterRoute(path.Join(a.cfg.LegacyHTTPPrefix, "/api/v1/series"), hf, true, "GET", "POST", "DELETE") diff --git a/pkg/api/api_test.go b/pkg/api/api_test.go index c25ca27234..f864199ee3 100644 --- a/pkg/api/api_test.go +++ b/pkg/api/api_test.go @@ -21,7 +21,7 @@ const ( type FakeLogger struct{} -func (fl *FakeLogger) Log(keyvals ...interface{}) error { +func (fl *FakeLogger) Log(keyvals ...any) error { return nil } @@ -89,6 +89,7 @@ func TestNewApiWithHeaderLogging(t *testing.T) { } +// HTTPHeaderMiddleware should be added even if no headers are specified to log because it also handles request ID injection. func TestNewApiWithoutHeaderLogging(t *testing.T) { cfg := Config{ HTTPRequestHeadersToLog: []string{}, @@ -102,7 +103,8 @@ func TestNewApiWithoutHeaderLogging(t *testing.T) { api, err := New(cfg, serverCfg, server, &FakeLogger{}) require.NoError(t, err) - require.Nil(t, api.HTTPHeaderMiddleware) + require.NotNil(t, api.HTTPHeaderMiddleware) + require.Empty(t, api.HTTPHeaderMiddleware.TargetHeaders) } @@ -185,12 +187,11 @@ func Benchmark_Compression(b *testing.B) { req.Header.Set(acceptEncodingHeader, "gzip") b.ReportAllocs() - b.ResetTimer() // Reusing the array to read the body and avoid allocation on the test encRespBody := make([]byte, len(respBody)) - for i := 0; i < b.N; i++ { + for b.Loop() { resp, err := client.Do(req) require.NoError(b, err) diff --git a/pkg/api/handlers.go b/pkg/api/handlers.go index 9bcc6a6906..2b30e8aa58 100644 --- a/pkg/api/handlers.go +++ b/pkg/api/handlers.go @@ -4,6 +4,7 @@ import ( "context" "encoding/json" "html/template" + "maps" "net/http" "path" "sync" @@ -19,13 +20,13 @@ import ( "github.com/prometheus/common/route" "github.com/prometheus/common/version" "github.com/prometheus/prometheus/config" - "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/storage" v1 "github.com/prometheus/prometheus/web/api/v1" "github.com/weaveworks/common/instrument" "github.com/weaveworks/common/middleware" "github.com/cortexproject/cortex/pkg/api/queryapi" + "github.com/cortexproject/cortex/pkg/engine" "github.com/cortexproject/cortex/pkg/querier" "github.com/cortexproject/cortex/pkg/querier/codec" "github.com/cortexproject/cortex/pkg/querier/stats" @@ -70,9 +71,7 @@ func (pc *IndexPageContent) GetContent() map[string]map[string]string { result := map[string]map[string]string{} for k, v := range pc.content { sm := map[string]string{} - for smK, smV := range v { - sm[smK] = smV - } + maps.Copy(sm, v) result[k] = sm } return result @@ -100,7 +99,7 @@ var indexPageTemplate = ` func indexHandler(httpPathPrefix string, content *IndexPageContent) http.HandlerFunc { templ := template.New("main") - templ.Funcs(map[string]interface{}{ + templ.Funcs(map[string]any{ "AddPathPrefix": func(link string) string { return path.Join(httpPathPrefix, link) }, @@ -115,16 +114,16 @@ func indexHandler(httpPathPrefix string, content *IndexPageContent) http.Handler } } -func (cfg *Config) configHandler(actualCfg interface{}, defaultCfg interface{}) http.HandlerFunc { +func (cfg *Config) configHandler(actualCfg any, defaultCfg any) http.HandlerFunc { if cfg.CustomConfigHandler != nil { return cfg.CustomConfigHandler(actualCfg, defaultCfg) } return DefaultConfigHandler(actualCfg, defaultCfg) } -func DefaultConfigHandler(actualCfg interface{}, defaultCfg interface{}) http.HandlerFunc { +func DefaultConfigHandler(actualCfg any, defaultCfg any) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { - var output interface{} + var output any switch r.URL.Query().Get("mode") { case "diff": defaultCfgObj, err := util.YAMLMarshalUnmarshal(defaultCfg) @@ -161,9 +160,10 @@ func DefaultConfigHandler(actualCfg interface{}, defaultCfg interface{}) http.Ha // server to fulfill the Prometheus query API. func NewQuerierHandler( cfg Config, + querierCfg querier.Config, queryable storage.SampleAndChunkQueryable, exemplarQueryable storage.ExemplarQueryable, - engine promql.QueryEngine, + engine engine.QueryEngine, metadataQuerier querier.MetadataQuerier, reg prometheus.Registerer, logger log.Logger, @@ -239,6 +239,9 @@ func NewQuerierHandler( false, false, false, + false, + querierCfg.LookbackDelta, + false, ) // Let's clear all codecs to create the instrumented ones api.ClearCodecs() @@ -291,6 +294,7 @@ func NewQuerierHandler( router.Path(path.Join(prefix, "/api/v1/query_range")).Methods("GET", "POST").Handler(queryAPI.Wrap(queryAPI.RangeQueryHandler)) router.Path(path.Join(prefix, "/api/v1/query_exemplars")).Methods("GET", "POST").Handler(promRouter) router.Path(path.Join(prefix, "/api/v1/format_query")).Methods("GET", "POST").Handler(promRouter) + router.Path(path.Join(prefix, "/api/v1/parse_query")).Methods("GET", "POST").Handler(promRouter) router.Path(path.Join(prefix, "/api/v1/labels")).Methods("GET", "POST").Handler(promRouter) router.Path(path.Join(prefix, "/api/v1/label/{name}/values")).Methods("GET").Handler(promRouter) router.Path(path.Join(prefix, "/api/v1/series")).Methods("GET", "POST", "DELETE").Handler(promRouter) @@ -305,6 +309,7 @@ func NewQuerierHandler( router.Path(path.Join(legacyPrefix, "/api/v1/query_range")).Methods("GET", "POST").Handler(queryAPI.Wrap(queryAPI.RangeQueryHandler)) router.Path(path.Join(legacyPrefix, "/api/v1/query_exemplars")).Methods("GET", "POST").Handler(legacyPromRouter) router.Path(path.Join(legacyPrefix, "/api/v1/format_query")).Methods("GET", "POST").Handler(legacyPromRouter) + router.Path(path.Join(legacyPrefix, "/api/v1/parse_query")).Methods("GET", "POST").Handler(legacyPromRouter) router.Path(path.Join(legacyPrefix, "/api/v1/labels")).Methods("GET", "POST").Handler(legacyPromRouter) router.Path(path.Join(legacyPrefix, "/api/v1/label/{name}/values")).Methods("GET").Handler(legacyPromRouter) router.Path(path.Join(legacyPrefix, "/api/v1/series")).Methods("GET", "POST", "DELETE").Handler(legacyPromRouter) diff --git a/pkg/api/handlers_test.go b/pkg/api/handlers_test.go index 32e84d70a9..cf3b7ee1a7 100644 --- a/pkg/api/handlers_test.go +++ b/pkg/api/handlers_test.go @@ -14,6 +14,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/weaveworks/common/user" + + "github.com/cortexproject/cortex/pkg/querier" ) func TestIndexHandlerPrefix(t *testing.T) { @@ -90,7 +92,7 @@ func TestConfigDiffHandler(t *testing.T) { name string expectedStatusCode int expectedBody string - actualConfig func() interface{} + actualConfig func() any }{ { name: "no config parameters overridden", @@ -99,7 +101,7 @@ func TestConfigDiffHandler(t *testing.T) { }, { name: "slice changed", - actualConfig: func() interface{} { + actualConfig: func() any { c := newDefaultDiffConfigMock() c.MySlice = append(c.MySlice, "value3") return c @@ -112,7 +114,7 @@ func TestConfigDiffHandler(t *testing.T) { }, { name: "string in nested struct changed", - actualConfig: func() interface{} { + actualConfig: func() any { c := newDefaultDiffConfigMock() c.MyNestedStruct.MyString = "string2" return c @@ -123,7 +125,7 @@ func TestConfigDiffHandler(t *testing.T) { }, { name: "bool in nested struct changed", - actualConfig: func() interface{} { + actualConfig: func() any { c := newDefaultDiffConfigMock() c.MyNestedStruct.MyBool = true return c @@ -134,7 +136,7 @@ func TestConfigDiffHandler(t *testing.T) { }, { name: "test invalid input", - actualConfig: func() interface{} { + actualConfig: func() any { c := "x" return &c }, @@ -146,7 +148,7 @@ func TestConfigDiffHandler(t *testing.T) { defaultCfg := newDefaultDiffConfigMock() t.Run(tc.name, func(t *testing.T) { - var actualCfg interface{} + var actualCfg any if tc.actualConfig != nil { actualCfg = tc.actualConfig() } else { @@ -171,7 +173,7 @@ func TestConfigDiffHandler(t *testing.T) { func TestConfigOverrideHandler(t *testing.T) { cfg := &Config{ - CustomConfigHandler: func(_ interface{}, _ interface{}) http.HandlerFunc { + CustomConfigHandler: func(_ any, _ any) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { _, err := w.Write([]byte("config")) assert.NoError(t, err) @@ -229,10 +231,11 @@ func TestBuildInfoAPI(t *testing.T) { } { t.Run(tc.name, func(t *testing.T) { cfg := Config{buildInfoEnabled: true} + querierConfig := querier.Config{} version.Version = tc.version version.Branch = tc.branch version.Revision = tc.revision - handler := NewQuerierHandler(cfg, nil, nil, nil, nil, nil, &FakeLogger{}) + handler := NewQuerierHandler(cfg, querierConfig, nil, nil, nil, nil, nil, &FakeLogger{}) writer := httptest.NewRecorder() req := httptest.NewRequest("GET", "/api/v1/status/buildinfo", nil) req = req.WithContext(user.InjectOrgID(req.Context(), "test")) diff --git a/pkg/api/middlewares.go b/pkg/api/middlewares.go index 8ddefaa2c6..cda2076a68 100644 --- a/pkg/api/middlewares.go +++ b/pkg/api/middlewares.go @@ -1,40 +1,52 @@ package api import ( - "context" "net/http" - util_log "github.com/cortexproject/cortex/pkg/util/log" + "github.com/google/uuid" + + "github.com/cortexproject/cortex/pkg/util/requestmeta" ) // HTTPHeaderMiddleware adds specified HTTPHeaders to the request context type HTTPHeaderMiddleware struct { - TargetHeaders []string + TargetHeaders []string + RequestIdHeader string } -// InjectTargetHeadersIntoHTTPRequest injects specified HTTPHeaders into the request context -func (h HTTPHeaderMiddleware) InjectTargetHeadersIntoHTTPRequest(r *http.Request) context.Context { - headerMap := make(map[string]string) +// injectRequestContext injects request related metadata into the request context +func (h HTTPHeaderMiddleware) injectRequestContext(r *http.Request) *http.Request { + requestContextMap := make(map[string]string) - // Check to make sure that Headers have not already been injected - checkMapInContext := util_log.HeaderMapFromContext(r.Context()) + // Check to make sure that request context have not already been injected + checkMapInContext := requestmeta.MapFromContext(r.Context()) if checkMapInContext != nil { - return r.Context() + return r } for _, target := range h.TargetHeaders { contents := r.Header.Get(target) if contents != "" { - headerMap[target] = contents + requestContextMap[target] = contents } } - return util_log.ContextWithHeaderMap(r.Context(), headerMap) + requestContextMap[requestmeta.LoggingHeadersKey] = requestmeta.LoggingHeaderKeysToString(h.TargetHeaders) + + reqId := r.Header.Get(h.RequestIdHeader) + if reqId == "" { + reqId = uuid.NewString() + } + requestContextMap[requestmeta.RequestIdKey] = reqId + requestContextMap[requestmeta.RequestSourceKey] = requestmeta.SourceAPI + + ctx := requestmeta.ContextWithRequestMetadataMap(r.Context(), requestContextMap) + return r.WithContext(ctx) } // Wrap implements Middleware func (h HTTPHeaderMiddleware) Wrap(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ctx := h.InjectTargetHeadersIntoHTTPRequest(r) - next.ServeHTTP(w, r.WithContext(ctx)) + r = h.injectRequestContext(r) + next.ServeHTTP(w, r) }) } diff --git a/pkg/api/middlewares_test.go b/pkg/api/middlewares_test.go index dbf8719ad4..691d3b2358 100644 --- a/pkg/api/middlewares_test.go +++ b/pkg/api/middlewares_test.go @@ -7,12 +7,11 @@ import ( "github.com/stretchr/testify/require" - util_log "github.com/cortexproject/cortex/pkg/util/log" + "github.com/cortexproject/cortex/pkg/util/requestmeta" ) -var HTTPTestMiddleware = HTTPHeaderMiddleware{TargetHeaders: []string{"TestHeader1", "TestHeader2", "Test3"}} - func TestHeaderInjection(t *testing.T) { + middleware := HTTPHeaderMiddleware{TargetHeaders: []string{"TestHeader1", "TestHeader2", "Test3"}} ctx := context.Background() h := http.Header{} contentsMap := make(map[string]string) @@ -32,12 +31,12 @@ func TestHeaderInjection(t *testing.T) { } req = req.WithContext(ctx) - ctx = HTTPTestMiddleware.InjectTargetHeadersIntoHTTPRequest(req) + req = middleware.injectRequestContext(req) - headerMap := util_log.HeaderMapFromContext(ctx) + headerMap := requestmeta.MapFromContext(req.Context()) require.NotNil(t, headerMap) - for _, header := range HTTPTestMiddleware.TargetHeaders { + for _, header := range middleware.TargetHeaders { require.Equal(t, contentsMap[header], headerMap[header]) } for header, contents := range contentsMap { @@ -46,6 +45,7 @@ func TestHeaderInjection(t *testing.T) { } func TestExistingHeaderInContextIsNotOverridden(t *testing.T) { + middleware := HTTPHeaderMiddleware{TargetHeaders: []string{"TestHeader1", "TestHeader2", "Test3"}} ctx := context.Background() h := http.Header{} @@ -58,7 +58,7 @@ func TestExistingHeaderInContextIsNotOverridden(t *testing.T) { h.Add("TestHeader2", "Fail2") h.Add("Test3", "Fail3") - ctx = util_log.ContextWithHeaderMap(ctx, contentsMap) + ctx = requestmeta.ContextWithRequestMetadataMap(ctx, contentsMap) req := &http.Request{ Method: "GET", RequestURI: "/HTTPHeaderTest", @@ -67,8 +67,77 @@ func TestExistingHeaderInContextIsNotOverridden(t *testing.T) { } req = req.WithContext(ctx) - ctx = HTTPTestMiddleware.InjectTargetHeadersIntoHTTPRequest(req) + req = middleware.injectRequestContext(req) + + require.Equal(t, contentsMap, requestmeta.MapFromContext(req.Context())) + +} + +func TestRequestIdInjection(t *testing.T) { + middleware := HTTPHeaderMiddleware{ + RequestIdHeader: "X-Request-ID", + } + + req := &http.Request{ + Method: "GET", + RequestURI: "/test", + Body: http.NoBody, + Header: http.Header{}, + } + req = req.WithContext(context.Background()) + req = middleware.injectRequestContext(req) + + requestID := requestmeta.RequestIdFromContext(req.Context()) + require.NotEmpty(t, requestID, "Request ID should be generated if not provided") +} + +func TestRequestIdFromHeaderIsUsed(t *testing.T) { + const providedID = "my-test-id-123" + + middleware := HTTPHeaderMiddleware{ + RequestIdHeader: "X-Request-ID", + } + + h := http.Header{} + h.Add("X-Request-ID", providedID) - require.Equal(t, contentsMap, util_log.HeaderMapFromContext(ctx)) + req := &http.Request{ + Method: "GET", + RequestURI: "/test", + Body: http.NoBody, + Header: h, + } + req = req.WithContext(context.Background()) + req = middleware.injectRequestContext(req) + + requestID := requestmeta.RequestIdFromContext(req.Context()) + require.Equal(t, providedID, requestID, "Request ID from header should be used") +} + +func TestTargetHeaderAndRequestIdHeaderOverlap(t *testing.T) { + const headerKey = "X-Request-ID" + const providedID = "overlap-id-456" + + middleware := HTTPHeaderMiddleware{ + TargetHeaders: []string{headerKey, "Other-Header"}, + RequestIdHeader: headerKey, + } + + h := http.Header{} + h.Add(headerKey, providedID) + h.Add("Other-Header", "some-value") + + req := &http.Request{ + Method: "GET", + RequestURI: "/test", + Body: http.NoBody, + Header: h, + } + req = req.WithContext(context.Background()) + req = middleware.injectRequestContext(req) + ctxMap := requestmeta.MapFromContext(req.Context()) + requestID := requestmeta.RequestIdFromContext(req.Context()) + require.Equal(t, providedID, ctxMap[headerKey], "Header value should be correctly stored") + require.Equal(t, providedID, requestID, "Request ID should come from the overlapping header") } diff --git a/pkg/api/queryapi/compression.go b/pkg/api/queryapi/compression.go new file mode 100644 index 0000000000..b7c4f0ce00 --- /dev/null +++ b/pkg/api/queryapi/compression.go @@ -0,0 +1,90 @@ +package queryapi + +import ( + "io" + "net/http" + "strings" + + "github.com/klauspost/compress/gzip" + "github.com/klauspost/compress/snappy" + "github.com/klauspost/compress/zlib" + "github.com/klauspost/compress/zstd" +) + +const ( + acceptEncodingHeader = "Accept-Encoding" + contentEncodingHeader = "Content-Encoding" + gzipEncoding = "gzip" + deflateEncoding = "deflate" + snappyEncoding = "snappy" + zstdEncoding = "zstd" +) + +// Wrapper around http.Handler which adds suitable response compression based +// on the client's Accept-Encoding headers. +type compressedResponseWriter struct { + http.ResponseWriter + writer io.Writer +} + +// Writes HTTP response content data. +func (c *compressedResponseWriter) Write(p []byte) (int, error) { + return c.writer.Write(p) +} + +// Closes the compressedResponseWriter and ensures to flush all data before. +func (c *compressedResponseWriter) Close() { + if zstdWriter, ok := c.writer.(*zstd.Encoder); ok { + zstdWriter.Flush() + } + if snappyWriter, ok := c.writer.(*snappy.Writer); ok { + snappyWriter.Flush() + } + if zlibWriter, ok := c.writer.(*zlib.Writer); ok { + zlibWriter.Flush() + } + if gzipWriter, ok := c.writer.(*gzip.Writer); ok { + gzipWriter.Flush() + } + if closer, ok := c.writer.(io.Closer); ok { + defer closer.Close() + } +} + +// Constructs a new compressedResponseWriter based on client request headers. +func newCompressedResponseWriter(writer http.ResponseWriter, req *http.Request) *compressedResponseWriter { + encodings := strings.SplitSeq(req.Header.Get(acceptEncodingHeader), ",") + for encoding := range encodings { + switch strings.TrimSpace(encoding) { + case zstdEncoding: + encoder, err := zstd.NewWriter(writer) + if err == nil { + writer.Header().Set(contentEncodingHeader, zstdEncoding) + return &compressedResponseWriter{ResponseWriter: writer, writer: encoder} + } + case snappyEncoding: + writer.Header().Set(contentEncodingHeader, snappyEncoding) + return &compressedResponseWriter{ResponseWriter: writer, writer: snappy.NewBufferedWriter(writer)} + case gzipEncoding: + writer.Header().Set(contentEncodingHeader, gzipEncoding) + return &compressedResponseWriter{ResponseWriter: writer, writer: gzip.NewWriter(writer)} + case deflateEncoding: + writer.Header().Set(contentEncodingHeader, deflateEncoding) + return &compressedResponseWriter{ResponseWriter: writer, writer: zlib.NewWriter(writer)} + } + } + return &compressedResponseWriter{ResponseWriter: writer, writer: writer} +} + +// CompressionHandler is a wrapper around http.Handler which adds suitable +// response compression based on the client's Accept-Encoding headers. +type CompressionHandler struct { + Handler http.Handler +} + +// ServeHTTP adds compression to the original http.Handler's ServeHTTP() method. +func (c CompressionHandler) ServeHTTP(writer http.ResponseWriter, req *http.Request) { + compWriter := newCompressedResponseWriter(writer, req) + c.Handler.ServeHTTP(compWriter, req) + compWriter.Close() +} diff --git a/pkg/api/queryapi/compression_test.go b/pkg/api/queryapi/compression_test.go new file mode 100644 index 0000000000..ce949b63ee --- /dev/null +++ b/pkg/api/queryapi/compression_test.go @@ -0,0 +1,159 @@ +package queryapi + +import ( + "bytes" + "io" + "net/http" + "net/http/httptest" + "testing" + + "github.com/klauspost/compress/gzip" + "github.com/klauspost/compress/snappy" + "github.com/klauspost/compress/zlib" + "github.com/klauspost/compress/zstd" + "github.com/stretchr/testify/require" +) + +func decompress(t *testing.T, encoding string, b []byte) []byte { + t.Helper() + + switch encoding { + case gzipEncoding: + r, err := gzip.NewReader(bytes.NewReader(b)) + require.NoError(t, err) + defer r.Close() + data, err := io.ReadAll(r) + require.NoError(t, err) + return data + case deflateEncoding: + r, err := zlib.NewReader(bytes.NewReader(b)) + require.NoError(t, err) + defer r.Close() + data, err := io.ReadAll(r) + require.NoError(t, err) + return data + case snappyEncoding: + data, err := io.ReadAll(snappy.NewReader(bytes.NewReader(b))) + require.NoError(t, err) + return data + case zstdEncoding: + r, err := zstd.NewReader(bytes.NewReader(b)) + require.NoError(t, err) + defer r.Close() + data, err := io.ReadAll(r) + require.NoError(t, err) + return data + default: + return b + } +} + +func TestNewCompressedResponseWriter_SupportedEncodings(t *testing.T) { + for _, tc := range []string{gzipEncoding, deflateEncoding, snappyEncoding, zstdEncoding} { + t.Run(tc, func(t *testing.T) { + rec := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodGet, "/", nil) + req.Header.Set(acceptEncodingHeader, tc) + + cw := newCompressedResponseWriter(rec, req) + payload := []byte("hello world") + _, err := cw.Write(payload) + require.NoError(t, err) + cw.Close() + + require.Equal(t, tc, rec.Header().Get(contentEncodingHeader)) + + decompressed := decompress(t, tc, rec.Body.Bytes()) + require.Equal(t, payload, decompressed) + + switch tc { + case gzipEncoding: + _, ok := cw.writer.(*gzip.Writer) + require.True(t, ok) + case deflateEncoding: + _, ok := cw.writer.(*zlib.Writer) + require.True(t, ok) + case snappyEncoding: + _, ok := cw.writer.(*snappy.Writer) + require.True(t, ok) + case zstdEncoding: + _, ok := cw.writer.(*zstd.Encoder) + require.True(t, ok) + } + }) + } +} + +func TestNewCompressedResponseWriter_UnsupportedEncoding(t *testing.T) { + for _, tc := range []string{"", "br", "unknown"} { + t.Run(tc, func(t *testing.T) { + rec := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodGet, "/", nil) + if tc != "" { + req.Header.Set(acceptEncodingHeader, tc) + } + + cw := newCompressedResponseWriter(rec, req) + payload := []byte("data") + _, err := cw.Write(payload) + require.NoError(t, err) + cw.Close() + + require.Empty(t, rec.Header().Get(contentEncodingHeader)) + require.Equal(t, payload, rec.Body.Bytes()) + require.Same(t, rec, cw.writer) + }) + } +} + +func TestNewCompressedResponseWriter_MultipleEncodings(t *testing.T) { + tests := []struct { + header string + expectEnc string + expectType any + }{ + {"snappy, gzip", snappyEncoding, &snappy.Writer{}}, + {"unknown, gzip", gzipEncoding, &gzip.Writer{}}, + } + + for _, tc := range tests { + t.Run(tc.header, func(t *testing.T) { + rec := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodGet, "/", nil) + req.Header.Set(acceptEncodingHeader, tc.header) + + cw := newCompressedResponseWriter(rec, req) + _, err := cw.Write([]byte("payload")) + require.NoError(t, err) + cw.Close() + + require.Equal(t, tc.expectEnc, rec.Header().Get(contentEncodingHeader)) + decompressed := decompress(t, tc.expectEnc, rec.Body.Bytes()) + require.Equal(t, []byte("payload"), decompressed) + + switch tc.expectEnc { + case gzipEncoding: + require.IsType(t, &gzip.Writer{}, cw.writer) + case snappyEncoding: + require.IsType(t, &snappy.Writer{}, cw.writer) + } + }) + } +} + +func TestCompressionHandler_ServeHTTP(t *testing.T) { + handler := CompressionHandler{Handler: http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + _, err := w.Write([]byte("hello")) + require.NoError(t, err) + })} + + rec := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodGet, "/", nil) + req.Header.Set(acceptEncodingHeader, gzipEncoding) + + handler.ServeHTTP(rec, req) + + require.Equal(t, gzipEncoding, rec.Header().Get(contentEncodingHeader)) + decompressed := decompress(t, gzipEncoding, rec.Body.Bytes()) + require.Equal(t, []byte("hello"), decompressed) +} diff --git a/pkg/api/queryapi/query_api.go b/pkg/api/queryapi/query_api.go index e3793ef5be..83eed69ec8 100644 --- a/pkg/api/queryapi/query_api.go +++ b/pkg/api/queryapi/query_api.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "net/http" + "strconv" "time" "github.com/go-kit/log" @@ -17,6 +18,7 @@ import ( v1 "github.com/prometheus/prometheus/web/api/v1" "github.com/weaveworks/common/httpgrpc" + "github.com/cortexproject/cortex/pkg/distributed_execution" "github.com/cortexproject/cortex/pkg/engine" "github.com/cortexproject/cortex/pkg/querier" "github.com/cortexproject/cortex/pkg/util" @@ -25,7 +27,7 @@ import ( type QueryAPI struct { queryable storage.SampleAndChunkQueryable - queryEngine promql.QueryEngine + queryEngine engine.QueryEngine now func() time.Time statsRenderer v1.StatsRenderer logger log.Logger @@ -34,7 +36,7 @@ type QueryAPI struct { } func NewQueryAPI( - qe promql.QueryEngine, + qe engine.QueryEngine, q storage.SampleAndChunkQueryable, statsRenderer v1.StatsRenderer, logger log.Logger, @@ -100,10 +102,29 @@ func (q *QueryAPI) RangeQueryHandler(r *http.Request) (result apiFuncResult) { ctx = engine.AddEngineTypeToContext(ctx, r) ctx = querier.AddBlockStoreTypeToContext(ctx, r.Header.Get(querier.BlockStoreTypeHeader)) - qry, err := q.queryEngine.NewRangeQuery(ctx, q.queryable, opts, r.FormValue("query"), convertMsToTime(start), convertMsToTime(end), convertMsToDuration(step)) - if err != nil { - return invalidParamError(httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error()), "query") + + var qry promql.Query + startTime := convertMsToTime(start) + endTime := convertMsToTime(end) + stepDuration := convertMsToDuration(step) + + byteLP := []byte(r.PostFormValue("plan")) + if len(byteLP) != 0 { + logicalPlan, err := distributed_execution.Unmarshal(byteLP) + if err != nil { + return apiFuncResult{nil, &apiError{errorInternal, fmt.Errorf("invalid logical plan: %v", err)}, nil, nil} + } + qry, err = q.queryEngine.MakeRangeQueryFromPlan(ctx, q.queryable, opts, logicalPlan, startTime, endTime, stepDuration, r.FormValue("query")) + if err != nil { + return apiFuncResult{nil, &apiError{errorInternal, fmt.Errorf("failed to create range query from logical plan: %v", err)}, nil, nil} + } + } else { // if there is logical plan field is empty, fall back + qry, err = q.queryEngine.NewRangeQuery(ctx, q.queryable, opts, r.FormValue("query"), startTime, endTime, stepDuration) + if err != nil { + return invalidParamError(httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error()), "query") + } } + // From now on, we must only return with a finalizer in the result (to // be called by the caller) or call qry.Close ourselves (which is // required in the case of a panic). @@ -156,9 +177,25 @@ func (q *QueryAPI) InstantQueryHandler(r *http.Request) (result apiFuncResult) { ctx = engine.AddEngineTypeToContext(ctx, r) ctx = querier.AddBlockStoreTypeToContext(ctx, r.Header.Get(querier.BlockStoreTypeHeader)) - qry, err := q.queryEngine.NewInstantQuery(ctx, q.queryable, opts, r.FormValue("query"), convertMsToTime(ts)) - if err != nil { - return invalidParamError(httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error()), "query") + + var qry promql.Query + tsTime := convertMsToTime(ts) + + byteLP := []byte(r.PostFormValue("plan")) + if len(byteLP) != 0 { + logicalPlan, err := distributed_execution.Unmarshal(byteLP) + if err != nil { + return apiFuncResult{nil, &apiError{errorInternal, fmt.Errorf("invalid logical plan: %v", err)}, nil, nil} + } + qry, err = q.queryEngine.MakeInstantQueryFromPlan(ctx, q.queryable, opts, logicalPlan, tsTime, r.FormValue("query")) + if err != nil { + return apiFuncResult{nil, &apiError{errorInternal, fmt.Errorf("failed to create instant query from logical plan: %v", err)}, nil, nil} + } + } else { // if there is logical plan field is empty, fall back + qry, err = q.queryEngine.NewInstantQuery(ctx, q.queryable, opts, r.FormValue("query"), tsTime) + if err != nil { + return invalidParamError(httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error()), "query") + } } // From now on, we must only return with a finalizer in the result (to @@ -208,12 +245,12 @@ func (q *QueryAPI) Wrap(f apiFunc) http.HandlerFunc { w.WriteHeader(http.StatusNoContent) } - return httputil.CompressionHandler{ + return CompressionHandler{ Handler: http.HandlerFunc(hf), }.ServeHTTP } -func (q *QueryAPI) respond(w http.ResponseWriter, req *http.Request, data interface{}, warnings annotations.Annotations, query string) { +func (q *QueryAPI) respond(w http.ResponseWriter, req *http.Request, data any, warnings annotations.Annotations, query string) { warn, info := warnings.AsStrings(query, 10, 10) resp := &v1.Response{ @@ -237,6 +274,7 @@ func (q *QueryAPI) respond(w http.ResponseWriter, req *http.Request, data interf } w.Header().Set("Content-Type", codec.ContentType().String()) + w.Header().Set("X-Uncompressed-Length", strconv.Itoa(len(b))) w.WriteHeader(http.StatusOK) if n, err := w.Write(b); err != nil { level.Error(q.logger).Log("error writing response", "url", req.URL, "bytesWritten", n, "err", err) diff --git a/pkg/api/queryapi/query_api_test.go b/pkg/api/queryapi/query_api_test.go index 028184a12b..2a0ce0cbc9 100644 --- a/pkg/api/queryapi/query_api_test.go +++ b/pkg/api/queryapi/query_api_test.go @@ -7,21 +7,28 @@ import ( "io" "net/http" "net/http/httptest" + "net/url" + "strings" "testing" "time" "github.com/go-kit/log" "github.com/gorilla/mux" "github.com/grafana/regexp" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql" + "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/util/annotations" v1 "github.com/prometheus/prometheus/web/api/v1" "github.com/stretchr/testify/require" + "github.com/thanos-io/promql-engine/logicalplan" + "github.com/thanos-io/promql-engine/query" "github.com/weaveworks/common/user" + engine2 "github.com/cortexproject/cortex/pkg/engine" "github.com/cortexproject/cortex/pkg/querier" "github.com/cortexproject/cortex/pkg/querier/series" "github.com/cortexproject/cortex/pkg/querier/stats" @@ -64,10 +71,14 @@ func (mockQuerier) Close() error { } func Test_CustomAPI(t *testing.T) { - engine := promql.NewEngine(promql.EngineOpts{ - MaxSamples: 100, - Timeout: time.Second * 2, - }) + engine := engine2.New( + promql.EngineOpts{ + MaxSamples: 100, + Timeout: time.Second * 2, + }, + engine2.ThanosEngineConfig{Enabled: false}, + prometheus.NewRegistry()) + mockQueryable := &mockSampleAndChunkQueryable{ queryableFn: func(_, _ int64) (storage.Querier, error) { return mockQuerier{ @@ -175,10 +186,10 @@ func Test_CustomAPI(t *testing.T) { c := NewQueryAPI(engine, mockQueryable, querier.StatsRenderer, log.NewNopLogger(), []v1.Codec{v1.JSONCodec{}}, regexp.MustCompile(".*")) router := mux.NewRouter() - router.Path("/api/v1/query").Methods("GET").Handler(c.Wrap(c.InstantQueryHandler)) - router.Path("/api/v1/query_range").Methods("GET").Handler(c.Wrap(c.RangeQueryHandler)) + router.Path("/api/v1/query").Methods("POST").Handler(c.Wrap(c.InstantQueryHandler)) + router.Path("/api/v1/query_range").Methods("POST").Handler(c.Wrap(c.RangeQueryHandler)) - req := httptest.NewRequest(http.MethodGet, test.path, nil) + req := httptest.NewRequest(http.MethodPost, test.path, nil) ctx := context.Background() _, ctx = stats.ContextWithEmptyStats(ctx) req = req.WithContext(user.InjectOrgID(ctx, "user1")) @@ -209,10 +220,14 @@ func (m *mockCodec) Encode(_ *v1.Response) ([]byte, error) { } func Test_InvalidCodec(t *testing.T) { - engine := promql.NewEngine(promql.EngineOpts{ - MaxSamples: 100, - Timeout: time.Second * 2, - }) + engine := engine2.New( + promql.EngineOpts{ + MaxSamples: 100, + Timeout: time.Second * 2, + }, + engine2.ThanosEngineConfig{Enabled: false}, + prometheus.NewRegistry()) + mockQueryable := &mockSampleAndChunkQueryable{ queryableFn: func(_, _ int64) (storage.Querier, error) { return mockQuerier{ @@ -231,9 +246,9 @@ func Test_InvalidCodec(t *testing.T) { queryAPI := NewQueryAPI(engine, mockQueryable, querier.StatsRenderer, log.NewNopLogger(), []v1.Codec{&mockCodec{}}, regexp.MustCompile(".*")) router := mux.NewRouter() - router.Path("/api/v1/query").Methods("GET").Handler(queryAPI.Wrap(queryAPI.InstantQueryHandler)) + router.Path("/api/v1/query").Methods("POST").Handler(queryAPI.Wrap(queryAPI.InstantQueryHandler)) - req := httptest.NewRequest(http.MethodGet, "/api/v1/query?query=test", nil) + req := httptest.NewRequest(http.MethodPost, "/api/v1/query?query=test", nil) ctx := context.Background() _, ctx = stats.ContextWithEmptyStats(ctx) req = req.WithContext(user.InjectOrgID(ctx, "user1")) @@ -244,10 +259,14 @@ func Test_InvalidCodec(t *testing.T) { } func Test_CustomAPI_StatsRenderer(t *testing.T) { - engine := promql.NewEngine(promql.EngineOpts{ - MaxSamples: 100, - Timeout: time.Second * 2, - }) + engine := engine2.New( + promql.EngineOpts{ + MaxSamples: 100, + Timeout: time.Second * 2, + }, + engine2.ThanosEngineConfig{Enabled: false}, + prometheus.NewRegistry()) + mockQueryable := &mockSampleAndChunkQueryable{ queryableFn: func(_, _ int64) (storage.Querier, error) { return mockQuerier{ @@ -269,9 +288,9 @@ func Test_CustomAPI_StatsRenderer(t *testing.T) { queryAPI := NewQueryAPI(engine, mockQueryable, querier.StatsRenderer, log.NewNopLogger(), []v1.Codec{v1.JSONCodec{}}, regexp.MustCompile(".*")) router := mux.NewRouter() - router.Path("/api/v1/query_range").Methods("GET").Handler(queryAPI.Wrap(queryAPI.RangeQueryHandler)) + router.Path("/api/v1/query_range").Methods("POST").Handler(queryAPI.Wrap(queryAPI.RangeQueryHandler)) - req := httptest.NewRequest(http.MethodGet, "/api/v1/query_range?end=1536673680&query=test&start=1536673665&step=5", nil) + req := httptest.NewRequest(http.MethodPost, "/api/v1/query_range?end=1536673680&query=test&start=1536673665&step=5", nil) ctx := context.Background() _, ctx = stats.ContextWithEmptyStats(ctx) req = req.WithContext(user.InjectOrgID(ctx, "user1")) @@ -285,3 +304,202 @@ func Test_CustomAPI_StatsRenderer(t *testing.T) { require.Equal(t, uint64(4), queryStats.LoadPeakSamples()) require.Equal(t, uint64(4), queryStats.LoadScannedSamples()) } + +func Test_Logicalplan_Requests(t *testing.T) { + engine := engine2.New( + promql.EngineOpts{ + MaxSamples: 100, + Timeout: time.Second * 2, + }, + engine2.ThanosEngineConfig{Enabled: true}, + prometheus.NewRegistry(), + ) + + mockMatrix := model.Matrix{ + { + Metric: model.Metric{"__name__": "test", "foo": "bar"}, + Values: []model.SamplePair{ + {Timestamp: 1536673665000, Value: 0}, + {Timestamp: 1536673670000, Value: 1}, + }, + }, + } + + mockQueryable := &mockSampleAndChunkQueryable{ + queryableFn: func(_, _ int64) (storage.Querier, error) { + return mockQuerier{matrix: mockMatrix}, nil + }, + } + + tests := []struct { + name string + path string + start int64 + end int64 + stepDuration int64 + requestBody func(t *testing.T) []byte + expectedCode int + expectedBody string + }{ + { + name: "[Range Query] with valid logical plan and empty query string", + path: "/api/v1/query_range?end=1536673680&query=&start=1536673665&step=5", + start: 1536673665, + end: 1536673680, + stepDuration: 5, + requestBody: func(t *testing.T) []byte { + return createTestLogicalPlan(t, 1536673665, 1536673680, 5) + }, + expectedCode: http.StatusOK, + expectedBody: `{"status":"success","data":{"resultType":"matrix","result":[{"metric":{"__name__":"test","foo":"bar"},"values":[[1536673665,"0"],[1536673670,"1"],[1536673675,"1"],[1536673680,"1"]]}]}}`, + }, + { + name: "[Range Query] with corrupted logical plan", // will throw an error from unmarhsal step + path: "/api/v1/query_range?end=1536673680&query=test&start=1536673665&step=5", + start: 1536673665, + end: 1536673680, + stepDuration: 5, + requestBody: func(t *testing.T) []byte { + return append(createTestLogicalPlan(t, 1536673665, 1536673680, 5), []byte("random data")...) + }, + expectedCode: http.StatusInternalServerError, + expectedBody: `{"status":"error","errorType":"server_error","error":"invalid logical plan: invalid character 'r' after top-level value"}`, + }, + { + name: "[Range Query] with empty body and non-empty query string", // fall back to promql query execution + path: "/api/v1/query_range?end=1536673680&query=test&start=1536673665&step=5", + start: 1536673665, + end: 1536673680, + stepDuration: 5, + requestBody: func(t *testing.T) []byte { + return []byte{} + }, + expectedCode: http.StatusOK, + expectedBody: `{"status":"success","data":{"resultType":"matrix","result":[{"metric":{"__name__":"test","foo":"bar"},"values":[[1536673665,"0"],[1536673670,"1"],[1536673675,"1"],[1536673680,"1"]]}]}}`, + }, + { + name: "[Range Query] with empty body and empty query string", // fall back to promql query execution, but will have error because of empty query string + path: "/api/v1/query_range?end=1536673680&query=&start=1536673665&step=5", + start: 1536673665, + end: 1536673680, + stepDuration: 5, + requestBody: func(t *testing.T) []byte { + return []byte{} + }, + expectedCode: http.StatusBadRequest, + expectedBody: "{\"status\":\"error\",\"errorType\":\"bad_data\",\"error\":\"invalid parameter \\\"query\\\"; unknown position: parse error: no expression found in input\"}", + }, + { + name: "[Instant Query] with valid logical plan and empty query string", + path: "/api/v1/query?query=test&time=1536673670", + start: 1536673670, + end: 1536673670, + stepDuration: 0, + requestBody: func(t *testing.T) []byte { + return createTestLogicalPlan(t, 1536673670, 1536673670, 0) + }, + expectedCode: http.StatusOK, + expectedBody: `{"status":"success","data":{"resultType":"vector","result":[{"metric":{"__name__":"test","foo":"bar"},"value":[1536673670,"1"]}]}}`, + }, + { + name: "[Instant Query] with corrupted logical plan", + path: "/api/v1/query?query=test&time=1536673670", + start: 1536673670, + end: 1536673670, + stepDuration: 0, + requestBody: func(t *testing.T) []byte { + return append(createTestLogicalPlan(t, 1536673670, 1536673670, 0), []byte("random data")...) + }, + expectedCode: http.StatusInternalServerError, + expectedBody: `{"status":"error","errorType":"server_error","error":"invalid logical plan: invalid character 'r' after top-level value"}`, + }, + { + name: "[Instant Query] with empty body and non-empty query string", + path: "/api/v1/query?query=test&time=1536673670", + start: 1536673670, + end: 1536673670, + stepDuration: 0, + requestBody: func(t *testing.T) []byte { + return []byte{} + }, + expectedCode: http.StatusOK, + expectedBody: `{"status":"success","data":{"resultType":"vector","result":[{"metric":{"__name__":"test","foo":"bar"},"value":[1536673670,"1"]}]}}`, + }, + { + name: "[Instant Query] with empty body and empty query string", + path: "/api/v1/query?query=&time=1536673670", + start: 1536673670, + end: 1536673670, + stepDuration: 0, + requestBody: func(t *testing.T) []byte { + return []byte{} + }, + expectedCode: http.StatusBadRequest, + expectedBody: "{\"status\":\"error\",\"errorType\":\"bad_data\",\"error\":\"invalid parameter \\\"query\\\"; unknown position: parse error: no expression found in input\"}", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := NewQueryAPI(engine, mockQueryable, querier.StatsRenderer, log.NewNopLogger(), []v1.Codec{v1.JSONCodec{}}, regexp.MustCompile(".*")) + router := mux.NewRouter() + router.Path("/api/v1/query").Methods("POST").Handler(c.Wrap(c.InstantQueryHandler)) + router.Path("/api/v1/query_range").Methods("POST").Handler(c.Wrap(c.RangeQueryHandler)) + + req := createTestRequest(tt.path, tt.requestBody(t)) + rec := httptest.NewRecorder() + router.ServeHTTP(rec, req) + + require.Equal(t, tt.expectedCode, rec.Code) + body, err := io.ReadAll(rec.Body) + require.NoError(t, err) + require.Equal(t, tt.expectedBody, string(body)) + }) + } +} + +func createTestRequest(path string, planBytes []byte) *http.Request { + form := url.Values{} + form.Set("plan", string(planBytes)) + req := httptest.NewRequest(http.MethodPost, path, io.NopCloser(strings.NewReader(form.Encode()))) + + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + ctx := context.Background() + _, ctx = stats.ContextWithEmptyStats(ctx) + return req.WithContext(user.InjectOrgID(ctx, "user1")) +} + +func createTestLogicalPlan(t *testing.T, start, end int64, stepDuration int64) []byte { + startTime, endTime := convertMsToTime(start), convertMsToTime(end) + step := convertMsToDuration(stepDuration) + + qOpts := query.Options{ + Start: startTime, + End: startTime, + Step: 0, + StepsBatch: 10, + LookbackDelta: 0, + EnablePerStepStats: false, + } + + if step != 0 { + qOpts.End = endTime + qOpts.Step = step + } + + // using a different metric name here so that we can check with debugger which query (from query string vs http request body) + // is being executed by the queriers + expr, err := parser.NewParser("up", parser.WithFunctions(parser.Functions)).ParseExpr() + require.NoError(t, err) + + planOpts := logicalplan.PlanOptions{ + DisableDuplicateLabelCheck: false, + } + + logicalPlan, err := logicalplan.NewFromAST(expr, &qOpts, planOpts) + require.NoError(t, err) + byteval, err := logicalplan.Marshal(logicalPlan.Root()) + require.NoError(t, err) + + return byteval +} diff --git a/pkg/api/queryapi/util.go b/pkg/api/queryapi/util.go index 9d85b8a96c..e9e43e8cb2 100644 --- a/pkg/api/queryapi/util.go +++ b/pkg/api/queryapi/util.go @@ -89,7 +89,7 @@ func returnAPIError(err error) *apiError { } type apiFuncResult struct { - data interface{} + data any err *apiError warnings annotations.Annotations finalizer func() diff --git a/pkg/chunk/cache/background.go b/pkg/chunk/cache/background.go index bfdfb748d8..1e74fe5012 100644 --- a/pkg/chunk/cache/background.go +++ b/pkg/chunk/cache/background.go @@ -83,10 +83,7 @@ const keysPerBatch = 100 // Store writes keys for the cache in the background. func (c *backgroundCache) Store(ctx context.Context, keys []string, bufs [][]byte) { for len(keys) > 0 { - num := keysPerBatch - if num > len(keys) { - num = len(keys) - } + num := min(keysPerBatch, len(keys)) bgWrite := backgroundWrite{ keys: keys[:num], diff --git a/pkg/chunk/cache/cache_test.go b/pkg/chunk/cache/cache_test.go index 5209b3e1b2..5ed7314caa 100644 --- a/pkg/chunk/cache/cache_test.go +++ b/pkg/chunk/cache/cache_test.go @@ -22,7 +22,7 @@ func fillCache(t *testing.T, cache cache.Cache) ([]string, []chunkenc.Chunk) { keys := []string{} bufs := [][]byte{} chunks := []chunkenc.Chunk{} - for i := 0; i < 111; i++ { + for i := range 111 { ts := model.TimeFromUnix(int64(i * chunkLen)) promChunk := chunkenc.NewXORChunk() appender, err := promChunk.Appender() @@ -41,7 +41,7 @@ func fillCache(t *testing.T, cache cache.Cache) ([]string, []chunkenc.Chunk) { } func testCacheSingle(t *testing.T, cache cache.Cache, keys []string, chunks []chunkenc.Chunk) { - for i := 0; i < 100; i++ { + for range 100 { index := rand.Intn(len(keys)) key := keys[index] @@ -73,7 +73,7 @@ func testCacheMultiple(t *testing.T, cache cache.Cache, keys []string, chunks [] } func testCacheMiss(t *testing.T, cache cache.Cache) { - for i := 0; i < 100; i++ { + for range 100 { key := strconv.Itoa(rand.Int()) // arbitrary key which should fail: no chunk key is a single integer found, bufs, missing := cache.Fetch(context.Background(), []string{key}) require.Empty(t, found) diff --git a/pkg/chunk/cache/fifo_cache_test.go b/pkg/chunk/cache/fifo_cache_test.go index 50aee975a3..3515d07735 100644 --- a/pkg/chunk/cache/fifo_cache_test.go +++ b/pkg/chunk/cache/fifo_cache_test.go @@ -44,7 +44,7 @@ func TestFifoCacheEviction(t *testing.T) { // Check put / get works keys := []string{} values := [][]byte{} - for i := 0; i < cnt; i++ { + for i := range cnt { key := fmt.Sprintf("%02d", i) value := make([]byte, len(key)) copy(value, key) @@ -65,7 +65,7 @@ func TestFifoCacheEviction(t *testing.T) { assert.Equal(t, testutil.ToFloat64(c.staleGets), float64(0)) assert.Equal(t, testutil.ToFloat64(c.memoryBytes), float64(cnt*sizeOf(itemTemplate))) - for i := 0; i < cnt; i++ { + for i := range cnt { key := fmt.Sprintf("%02d", i) value, ok := c.Get(ctx, key) require.True(t, ok) @@ -107,7 +107,7 @@ func TestFifoCacheEviction(t *testing.T) { assert.Equal(t, testutil.ToFloat64(c.staleGets), float64(0)) assert.Equal(t, testutil.ToFloat64(c.memoryBytes), float64(cnt*sizeOf(itemTemplate))) - for i := 0; i < cnt-evicted; i++ { + for i := range cnt - evicted { _, ok := c.Get(ctx, fmt.Sprintf("%02d", i)) require.False(t, ok) } @@ -145,7 +145,7 @@ func TestFifoCacheEviction(t *testing.T) { for i := cnt; i < cnt+evicted; i++ { value, ok := c.Get(ctx, fmt.Sprintf("%02d", i)) require.True(t, ok) - require.Equal(t, []byte(fmt.Sprintf("%02d", i*2)), value) + require.Equal(t, fmt.Appendf(nil, "%02d", i*2), value) } assert.Equal(t, testutil.ToFloat64(c.entriesAdded), float64(3)) diff --git a/pkg/chunk/cache/memcached_client.go b/pkg/chunk/cache/memcached_client.go index d1b167e26b..49c3edd4d7 100644 --- a/pkg/chunk/cache/memcached_client.go +++ b/pkg/chunk/cache/memcached_client.go @@ -180,7 +180,7 @@ func (c *memcachedClient) dialViaCircuitBreaker(_ context.Context, network, addr } c.Unlock() - conn, err := cb.Execute(func() (interface{}, error) { + conn, err := cb.Execute(func() (any, error) { return net.DialTimeout(network, address, c.cbTimeout) }) if err != nil { diff --git a/pkg/chunk/cache/memcached_client_selector_test.go b/pkg/chunk/cache/memcached_client_selector_test.go index 69305670b6..c4364bcb76 100644 --- a/pkg/chunk/cache/memcached_client_selector_test.go +++ b/pkg/chunk/cache/memcached_client_selector_test.go @@ -44,7 +44,7 @@ func TestMemcachedJumpHashSelector_PickSever(t *testing.T) { // to make sure different IPs were discovered during SetServers distribution := make(map[string]int) - for i := 0; i < 100; i++ { + for i := range 100 { key := fmt.Sprintf("key-%d", i) addr, err := s.PickServer(key) require.NoError(t, err) diff --git a/pkg/chunk/cache/memcached_test.go b/pkg/chunk/cache/memcached_test.go index f15c27333c..ebb2581e9e 100644 --- a/pkg/chunk/cache/memcached_test.go +++ b/pkg/chunk/cache/memcached_test.go @@ -43,19 +43,19 @@ func testMemcache(t *testing.T, memcache *cache.Memcached) { bufs := make([][]byte, 0, numKeys) // Insert 1000 keys skipping all multiples of 5. - for i := 0; i < numKeys; i++ { + for i := range numKeys { keysIncMissing = append(keysIncMissing, fmt.Sprint(i)) if i%5 == 0 { continue } keys = append(keys, fmt.Sprint(i)) - bufs = append(bufs, []byte(fmt.Sprint(i))) + bufs = append(bufs, fmt.Append(nil, i)) } memcache.Store(ctx, keys, bufs) found, bufs, missing := memcache.Fetch(ctx, keysIncMissing) - for i := 0; i < numKeys; i++ { + for i := range numKeys { if i%5 == 0 { require.Equal(t, fmt.Sprint(i), missing[0]) missing = missing[1:] @@ -118,17 +118,17 @@ func testMemcacheFailing(t *testing.T, memcache *cache.Memcached) { keys := make([]string, 0, numKeys) bufs := make([][]byte, 0, numKeys) // Insert 1000 keys skipping all multiples of 5. - for i := 0; i < numKeys; i++ { + for i := range numKeys { keysIncMissing = append(keysIncMissing, fmt.Sprint(i)) if i%5 == 0 { continue } keys = append(keys, fmt.Sprint(i)) - bufs = append(bufs, []byte(fmt.Sprint(i))) + bufs = append(bufs, fmt.Append(nil, i)) } memcache.Store(ctx, keys, bufs) - for i := 0; i < 10; i++ { + for range 10 { found, bufs, missing := memcache.Fetch(ctx, keysIncMissing) require.Equal(t, len(found), len(bufs)) @@ -182,9 +182,9 @@ func testMemcachedStopping(t *testing.T, memcache *cache.Memcached) { ctx := context.Background() keys := make([]string, 0, numKeys) bufs := make([][]byte, 0, numKeys) - for i := 0; i < numKeys; i++ { + for i := range numKeys { keys = append(keys, fmt.Sprint(i)) - bufs = append(bufs, []byte(fmt.Sprint(i))) + bufs = append(bufs, fmt.Append(nil, i)) } memcache.Store(ctx, keys, bufs) diff --git a/pkg/chunk/cache/redis_cache_test.go b/pkg/chunk/cache/redis_cache_test.go index d0f7c7ca35..154e688066 100644 --- a/pkg/chunk/cache/redis_cache_test.go +++ b/pkg/chunk/cache/redis_cache_test.go @@ -35,7 +35,7 @@ func TestRedisCache(t *testing.T) { require.Len(t, found, nHit) require.Len(t, missed, 0) - for i := 0; i < nHit; i++ { + for i := range nHit { require.Equal(t, keys[i], found[i]) require.Equal(t, bufs[i], data[i]) } @@ -45,7 +45,7 @@ func TestRedisCache(t *testing.T) { require.Len(t, found, 0) require.Len(t, missed, nMiss) - for i := 0; i < nMiss; i++ { + for i := range nMiss { require.Equal(t, miss[i], missed[i]) } } diff --git a/pkg/chunk/fixtures.go b/pkg/chunk/fixtures.go index 9227415db0..433cd8c277 100644 --- a/pkg/chunk/fixtures.go +++ b/pkg/chunk/fixtures.go @@ -8,22 +8,22 @@ import ( ) // BenchmarkLabels is a real example from Kubernetes' embedded cAdvisor metrics, lightly obfuscated -var BenchmarkLabels = labels.Labels{ - {Name: model.MetricNameLabel, Value: "container_cpu_usage_seconds_total"}, - {Name: "beta_kubernetes_io_arch", Value: "amd64"}, - {Name: "beta_kubernetes_io_instance_type", Value: "c3.somesize"}, - {Name: "beta_kubernetes_io_os", Value: "linux"}, - {Name: "container_name", Value: "some-name"}, - {Name: "cpu", Value: "cpu01"}, - {Name: "failure_domain_beta_kubernetes_io_region", Value: "somewhere-1"}, - {Name: "failure_domain_beta_kubernetes_io_zone", Value: "somewhere-1b"}, - {Name: "id", Value: "/kubepods/burstable/pod6e91c467-e4c5-11e7-ace3-0a97ed59c75e/a3c8498918bd6866349fed5a6f8c643b77c91836427fb6327913276ebc6bde28"}, - {Name: "image", Value: "registry/organisation/name@sha256:dca3d877a80008b45d71d7edc4fd2e44c0c8c8e7102ba5cbabec63a374d1d506"}, - {Name: "instance", Value: "ip-111-11-1-11.ec2.internal"}, - {Name: "job", Value: "kubernetes-cadvisor"}, - {Name: "kubernetes_io_hostname", Value: "ip-111-11-1-11"}, - {Name: "monitor", Value: "prod"}, - {Name: "name", Value: "k8s_some-name_some-other-name-5j8s8_kube-system_6e91c467-e4c5-11e7-ace3-0a97ed59c75e_0"}, - {Name: "namespace", Value: "kube-system"}, - {Name: "pod_name", Value: "some-other-name-5j8s8"}, -} +var BenchmarkLabels = labels.FromStrings( + model.MetricNameLabel, "container_cpu_usage_seconds_total", + "beta_kubernetes_io_arch", "amd64", + "beta_kubernetes_io_instance_type", "c3.somesize", + "beta_kubernetes_io_os", "linux", + "container_name", "some-name", + "cpu", "cpu01", + "failure_domain_beta_kubernetes_io_region", "somewhere-1", + "failure_domain_beta_kubernetes_io_zone", "somewhere-1b", + "id", "/kubepods/burstable/pod6e91c467-e4c5-11e7-ace3-0a97ed59c75e/a3c8498918bd6866349fed5a6f8c643b77c91836427fb6327913276ebc6bde28", + "image", "registry/organisation/name@sha256:dca3d877a80008b45d71d7edc4fd2e44c0c8c8e7102ba5cbabec63a374d1d506", + "instance", "ip-111-11-1-11.ec2.internal", + "job", "kubernetes-cadvisor", + "kubernetes_io_hostname", "ip-111-11-1-11", + "monitor", "prod", + "name", "k8s_some-name_some-other-name-5j8s8_kube-system_6e91c467-e4c5-11e7-ace3-0a97ed59c75e_0", + "namespace", "kube-system", + "pod_name", "some-other-name-5j8s8", +) diff --git a/pkg/chunk/json_helpers.go b/pkg/chunk/json_helpers.go index 9107f7d8c2..2171114938 100644 --- a/pkg/chunk/json_helpers.go +++ b/pkg/chunk/json_helpers.go @@ -1,7 +1,6 @@ package chunk import ( - "sort" "unsafe" jsoniter "github.com/json-iterator/go" @@ -19,35 +18,40 @@ func init() { // Override Prometheus' labels.Labels decoder which goes via a map func DecodeLabels(ptr unsafe.Pointer, iter *jsoniter.Iterator) { labelsPtr := (*labels.Labels)(ptr) - *labelsPtr = make(labels.Labels, 0, 10) + b := labels.NewBuilder(labels.EmptyLabels()) + iter.ReadMapCB(func(iter *jsoniter.Iterator, key string) bool { value := iter.ReadString() - *labelsPtr = append(*labelsPtr, labels.Label{Name: key, Value: value}) + b.Set(key, value) return true }) - // Labels are always sorted, but earlier Cortex using a map would - // output in any order so we have to sort on read in - sort.Sort(*labelsPtr) + *labelsPtr = b.Labels() } // Override Prometheus' labels.Labels encoder which goes via a map func EncodeLabels(ptr unsafe.Pointer, stream *jsoniter.Stream) { - labelsPtr := (*labels.Labels)(ptr) + lbls := *(*labels.Labels)(ptr) + stream.WriteObjectStart() - for i, v := range *labelsPtr { - if i != 0 { + first := true + + lbls.Range(func(l labels.Label) { + if !first { stream.WriteMore() } - stream.WriteString(v.Name) + first = false + + stream.WriteString(l.Name) stream.WriteRaw(`:`) - stream.WriteString(v.Value) - } + stream.WriteString(l.Value) + }) + stream.WriteObjectEnd() } func labelsIsEmpty(ptr unsafe.Pointer) bool { - labelsPtr := (*labels.Labels)(ptr) - return len(*labelsPtr) == 0 + labelsPtr := *(*labels.Labels)(ptr) + return labelsPtr.Len() == 0 } // Decode via jsoniter's float64 routine is faster than getting the string data and decoding as two integers diff --git a/pkg/compactor/blocks_cleaner.go b/pkg/compactor/blocks_cleaner.go index dd957d264c..8e1e9a6055 100644 --- a/pkg/compactor/blocks_cleaner.go +++ b/pkg/compactor/blocks_cleaner.go @@ -475,7 +475,7 @@ func (c *BlocksCleaner) deleteUserMarkedForDeletion(ctx context.Context, userLog } c.tenantBucketIndexLastUpdate.DeleteLabelValues(userID) - var blocksToDelete []interface{} + var blocksToDelete []any err := userBucket.Iter(ctx, "", func(name string) error { if err := ctx.Err(); err != nil { return err @@ -492,7 +492,7 @@ func (c *BlocksCleaner) deleteUserMarkedForDeletion(ctx context.Context, userLog } var deletedBlocks, failed atomic.Int64 - err = concurrency.ForEach(ctx, blocksToDelete, defaultDeleteBlocksConcurrency, func(ctx context.Context, job interface{}) error { + err = concurrency.ForEach(ctx, blocksToDelete, defaultDeleteBlocksConcurrency, func(ctx context.Context, job any) error { blockID := job.(ulid.ULID) err := block.Delete(ctx, userLogger, userBucket, blockID) if err != nil { @@ -697,7 +697,7 @@ func (c *BlocksCleaner) cleanUser(ctx context.Context, userLogger log.Logger, us // Delete blocks marked for deletion. We iterate over a copy of deletion marks because // we'll need to manipulate the index (removing blocks which get deleted). begin = time.Now() - blocksToDelete := make([]interface{}, 0, len(idx.BlockDeletionMarks)) + blocksToDelete := make([]any, 0, len(idx.BlockDeletionMarks)) var mux sync.Mutex for _, mark := range idx.BlockDeletionMarks.Clone() { if time.Since(mark.GetDeletionTime()).Seconds() <= c.cfg.DeletionDelay.Seconds() { @@ -709,7 +709,7 @@ func (c *BlocksCleaner) cleanUser(ctx context.Context, userLogger log.Logger, us // Concurrently deletes blocks marked for deletion, and removes blocks from index. begin = time.Now() - _ = concurrency.ForEach(ctx, blocksToDelete, defaultDeleteBlocksConcurrency, func(ctx context.Context, job interface{}) error { + _ = concurrency.ForEach(ctx, blocksToDelete, defaultDeleteBlocksConcurrency, func(ctx context.Context, job any) error { blockID := job.(ulid.ULID) if err := block.Delete(ctx, userLogger, userBucket, blockID); err != nil { @@ -884,7 +884,7 @@ func (c *BlocksCleaner) iterPartitionGroups(ctx context.Context, userBucket objs // and index are updated accordingly. func (c *BlocksCleaner) cleanUserPartialBlocks(ctx context.Context, userID string, partials map[ulid.ULID]error, idx *bucketindex.Index, userBucket objstore.InstrumentedBucket, userLogger log.Logger) { // Collect all blocks with missing meta.json into buffered channel. - blocks := make([]interface{}, 0, len(partials)) + blocks := make([]any, 0, len(partials)) for blockID, blockErr := range partials { // We can safely delete only blocks which are partial because the meta.json is missing. @@ -896,7 +896,7 @@ func (c *BlocksCleaner) cleanUserPartialBlocks(ctx context.Context, userID strin var mux sync.Mutex - _ = concurrency.ForEach(ctx, blocks, defaultDeleteBlocksConcurrency, func(ctx context.Context, job interface{}) error { + _ = concurrency.ForEach(ctx, blocks, defaultDeleteBlocksConcurrency, func(ctx context.Context, job any) error { blockID := job.(ulid.ULID) // We can safely delete only partial blocks with a deletion mark. err := metadata.ReadMarker(ctx, userLogger, userBucket, blockID.String(), &metadata.DeletionMark{}) diff --git a/pkg/compactor/blocks_cleaner_test.go b/pkg/compactor/blocks_cleaner_test.go index 787d377d63..9b13d7c1b9 100644 --- a/pkg/compactor/blocks_cleaner_test.go +++ b/pkg/compactor/blocks_cleaner_test.go @@ -50,7 +50,6 @@ func TestBlocksCleaner(t *testing.T) { {concurrency: 2}, {concurrency: 10}, } { - options := options t.Run(options.String(), func(t *testing.T) { t.Parallel() diff --git a/pkg/compactor/compactor.go b/pkg/compactor/compactor.go index e9ac396cf2..65fa95fea0 100644 --- a/pkg/compactor/compactor.go +++ b/pkg/compactor/compactor.go @@ -9,6 +9,7 @@ import ( "math/rand" "os" "path/filepath" + "slices" "strings" "time" @@ -353,10 +354,8 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { } func (cfg *Config) Validate(limits validation.Limits) error { - for _, blockRange := range cfg.BlockRanges { - if blockRange == 0 { - return errors.New("compactor block range period cannot be zero") - } + if slices.Contains(cfg.BlockRanges, 0) { + return errors.New("compactor block range period cannot be zero") } // Each block range period should be divisible by the previous one. for i := 1; i < len(cfg.BlockRanges); i++ { @@ -366,7 +365,7 @@ func (cfg *Config) Validate(limits validation.Limits) error { } // Make sure a valid sharding strategy is being used - if !util.StringsContain(supportedShardingStrategies, cfg.ShardingStrategy) { + if !slices.Contains(supportedShardingStrategies, cfg.ShardingStrategy) { return errInvalidShardingStrategy } @@ -377,7 +376,7 @@ func (cfg *Config) Validate(limits validation.Limits) error { } // Make sure a valid compaction strategy is being used - if !util.StringsContain(supportedCompactionStrategies, cfg.CompactionStrategy) { + if !slices.Contains(supportedCompactionStrategies, cfg.CompactionStrategy) { return errInvalidCompactionStrategy } @@ -1280,12 +1279,7 @@ func (c *Compactor) isCausedByPermissionDenied(err error) bool { cause = errors.Unwrap(cause) } if multiErr, ok := cause.(errutil.NonNilMultiRootError); ok { - for _, err := range multiErr { - if c.isPermissionDeniedErr(err) { - return true - } - } - return false + return slices.ContainsFunc(multiErr, c.isPermissionDeniedErr) } return c.isPermissionDeniedErr(cause) } diff --git a/pkg/compactor/compactor_metrics_test.go b/pkg/compactor/compactor_metrics_test.go index 75879f2d96..0288bbe909 100644 --- a/pkg/compactor/compactor_metrics_test.go +++ b/pkg/compactor/compactor_metrics_test.go @@ -49,6 +49,7 @@ func TestCompactorMetrics(t *testing.T) { cortex_compactor_meta_synced{state="marked-for-deletion"} 0 cortex_compactor_meta_synced{state="marked-for-no-compact"} 0 cortex_compactor_meta_synced{state="no-meta-json"} 0 + cortex_compactor_meta_synced{state="parquet-migrated"} 0 cortex_compactor_meta_synced{state="time-excluded"} 0 cortex_compactor_meta_synced{state="too-fresh"} 0 # HELP cortex_compactor_meta_syncs_total Total blocks metadata synchronization attempts. diff --git a/pkg/compactor/compactor_paritioning_test.go b/pkg/compactor/compactor_paritioning_test.go index 1e5627590b..593e94d2ae 100644 --- a/pkg/compactor/compactor_paritioning_test.go +++ b/pkg/compactor/compactor_paritioning_test.go @@ -18,6 +18,7 @@ import ( "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" prom_testutil "github.com/prometheus/client_golang/prometheus/testutil" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/tsdb" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -214,7 +215,7 @@ func TestPartitionCompactor_SkipCompactionWhenCmkError(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(context.Background(), c)) // Wait until a run has completed. - cortex_testutil.Poll(t, 20*time.Second, 1.0, func() interface{} { + cortex_testutil.Poll(t, 20*time.Second, 1.0, func() any { return prom_testutil.ToFloat64(c.CompactionRunsCompleted) }) @@ -235,7 +236,7 @@ func TestPartitionCompactor_ShouldDoNothingOnNoUserBlocks(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(context.Background(), c)) // Wait until a run has completed. - cortex_testutil.Poll(t, 20*time.Second, 1.0, func() interface{} { + cortex_testutil.Poll(t, 20*time.Second, 1.0, func() any { return prom_testutil.ToFloat64(c.CompactionRunsCompleted) }) @@ -311,7 +312,7 @@ func TestPartitionCompactor_ShouldRetryCompactionOnFailureWhileDiscoveringUsersF require.NoError(t, services.StartAndAwaitRunning(context.Background(), c)) // Wait until all retry attempts have completed. - cortex_testutil.Poll(t, 20*time.Second, 1.0, func() interface{} { + cortex_testutil.Poll(t, 20*time.Second, 1.0, func() any { return prom_testutil.ToFloat64(c.CompactionRunsFailed) }) @@ -410,7 +411,7 @@ func TestPartitionCompactor_ShouldIncrementCompactionErrorIfFailedToCompactASing require.NoError(t, services.StartAndAwaitRunning(context.Background(), c)) // Wait until all retry attempts have completed. - cortex_testutil.Poll(t, 20*time.Second, 1.0, func() interface{} { + cortex_testutil.Poll(t, 20*time.Second, 1.0, func() any { return prom_testutil.ToFloat64(c.CompactionRunsFailed) }) @@ -478,7 +479,7 @@ func TestPartitionCompactor_ShouldCompactAndRemoveUserFolder(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(context.Background(), c)) // Wait until a run has completed. - cortex_testutil.Poll(t, 20*time.Second, 1.0, func() interface{} { + cortex_testutil.Poll(t, 20*time.Second, 1.0, func() any { return prom_testutil.ToFloat64(c.CompactionRunsCompleted) }) @@ -556,7 +557,7 @@ func TestPartitionCompactor_ShouldIterateOverUsersAndRunCompaction(t *testing.T) require.NoError(t, services.StartAndAwaitRunning(context.Background(), c)) // Wait until a run has completed. - cortex_testutil.Poll(t, 20*time.Second, 1.0, func() interface{} { + cortex_testutil.Poll(t, 20*time.Second, 1.0, func() any { return prom_testutil.ToFloat64(c.CompactionRunsCompleted) }) @@ -692,7 +693,7 @@ func TestPartitionCompactor_ShouldNotCompactBlocksMarkedForDeletion(t *testing.T require.NoError(t, services.StartAndAwaitRunning(context.Background(), c)) // Wait until a run has completed. - cortex_testutil.Poll(t, 20*time.Second, 1.0, func() interface{} { + cortex_testutil.Poll(t, 20*time.Second, 1.0, func() any { return prom_testutil.ToFloat64(c.CompactionRunsCompleted) }) @@ -833,7 +834,7 @@ func TestPartitionCompactor_ShouldNotCompactBlocksMarkedForSkipCompact(t *testin require.NoError(t, services.StartAndAwaitRunning(context.Background(), c)) - cortex_testutil.Poll(t, 20*time.Second, 1.0, func() interface{} { + cortex_testutil.Poll(t, 20*time.Second, 1.0, func() any { return prom_testutil.ToFloat64(c.CompactionRunsCompleted) }) @@ -901,7 +902,7 @@ func TestPartitionCompactor_ShouldNotCompactBlocksForUsersMarkedForDeletion(t *t require.NoError(t, services.StartAndAwaitRunning(context.Background(), c)) // Wait until a run has completed. - cortex_testutil.Poll(t, 20*time.Second, 1.0, func() interface{} { + cortex_testutil.Poll(t, 20*time.Second, 1.0, func() any { return prom_testutil.ToFloat64(c.CompactionRunsCompleted) }) @@ -1012,7 +1013,7 @@ func TestPartitionCompactor_ShouldSkipOutOrOrderBlocks(t *testing.T) { defer services.StopAndAwaitTerminated(context.Background(), c) //nolint:errcheck // Wait until a run has completed. - cortex_testutil.Poll(t, 20*time.Second, true, func() interface{} { + cortex_testutil.Poll(t, 20*time.Second, true, func() any { if _, err := os.Stat(path.Join(dir, "no-compact-mark.json")); err == nil { return true } @@ -1041,7 +1042,9 @@ func TestPartitionCompactor_ShouldCompactAllUsersOnShardingEnabledButOnlyOneInst bucketClient.MockExists(cortex_tsdb.GetGlobalDeletionMarkPath("user-2"), false, nil) bucketClient.MockExists(cortex_tsdb.GetLocalDeletionMarkPath("user-2"), false, nil) bucketClient.MockIter("user-1/", []string{"user-1/01DTVP434PA9VFXSW2JKB3392D", "user-1/01DTVP434PA9VFXSW2JKB3392D/meta.json", "user-1/01FN6CDF3PNEWWRY5MPGJPE3EX/meta.json"}, nil) + //bucketClient.MockIterWithAttributes("user-1/", []string{"user-1/01DTVP434PA9VFXSW2JKB3392D", "user-1/01DTVP434PA9VFXSW2JKB3392D/meta.json", "user-1/01FN6CDF3PNEWWRY5MPGJPE3EX/meta.json"}, nil) bucketClient.MockIter("user-2/", []string{"user-2/01DTW0ZCPDDNV4BV83Q2SV4QAZ", "user-2/01DTW0ZCPDDNV4BV83Q2SV4QAZ/meta.json", "user-2/01FN3V83ABR9992RF8WRJZ76ZQ/meta.json"}, nil) + //bucketClient.MockIterWithAttributes("user-2/", []string{"user-2/01DTW0ZCPDDNV4BV83Q2SV4QAZ", "user-2/01DTW0ZCPDDNV4BV83Q2SV4QAZ/meta.json", "user-2/01FN3V83ABR9992RF8WRJZ76ZQ/meta.json"}, nil) bucketClient.MockIter("user-1/markers/", nil, nil) bucketClient.MockGet("user-1/markers/cleaner-visit-marker.json", "", nil) bucketClient.MockUpload("user-1/markers/cleaner-visit-marker.json", nil) @@ -1106,7 +1109,7 @@ func TestPartitionCompactor_ShouldCompactAllUsersOnShardingEnabledButOnlyOneInst require.NoError(t, services.StartAndAwaitRunning(context.Background(), c)) // Wait until a run has completed. - cortex_testutil.Poll(t, 20*time.Second, 1.0, func() interface{} { + cortex_testutil.Poll(t, 20*time.Second, 1.0, func() any { return prom_testutil.ToFloat64(c.CompactionRunsCompleted) }) @@ -1211,7 +1214,7 @@ func TestPartitionCompactor_ShouldCompactOnlyUsersOwnedByTheInstanceOnShardingEn // Wait until a run has been completed on each compactor for _, c := range compactors { - cortex_testutil.Poll(t, 60*time.Second, true, func() interface{} { + cortex_testutil.Poll(t, 60*time.Second, true, func() any { return prom_testutil.ToFloat64(c.CompactionRunsCompleted) >= 1 }) } @@ -1362,7 +1365,7 @@ func TestPartitionCompactor_ShouldCompactOnlyShardsOwnedByTheInstanceOnShardingE // Wait until a run has been completed on each compactor for _, c := range compactors { - cortex_testutil.Poll(t, 60*time.Second, 1.0, func() interface{} { + cortex_testutil.Poll(t, 60*time.Second, 1.0, func() any { return prom_testutil.ToFloat64(c.CompactionRunsCompleted) }) } @@ -1507,7 +1510,7 @@ func mockBlockGroup(userID string, ids []string, bkt *bucket.ClientMock) *compac log.NewNopLogger(), bkt, getPartitionedGroupID(userID), - nil, + labels.EmptyLabels(), 0, true, true, @@ -1591,7 +1594,7 @@ func TestPartitionCompactor_DeleteLocalSyncFiles(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(context.Background(), c1)) // Wait until a run has been completed on first compactor. This happens as soon as compactor starts. - cortex_testutil.Poll(t, 20*time.Second, true, func() interface{} { + cortex_testutil.Poll(t, 20*time.Second, true, func() any { return prom_testutil.ToFloat64(c1.CompactionRunsCompleted) >= 1 }) @@ -1602,7 +1605,7 @@ func TestPartitionCompactor_DeleteLocalSyncFiles(t *testing.T) { // Now start second compactor, and wait until it runs compaction. require.NoError(t, services.StartAndAwaitRunning(context.Background(), c2)) - cortex_testutil.Poll(t, 20*time.Second, 1.0, func() interface{} { + cortex_testutil.Poll(t, 20*time.Second, 1.0, func() any { return prom_testutil.ToFloat64(c2.CompactionRunsCompleted) }) @@ -1709,7 +1712,7 @@ func TestPartitionCompactor_ShouldNotHangIfPlannerReturnNothing(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(context.Background(), c)) // Wait until a run has completed. - cortex_testutil.Poll(t, 20*time.Second, 1.0, func() interface{} { + cortex_testutil.Poll(t, 20*time.Second, 1.0, func() any { return prom_testutil.ToFloat64(c.CompactionRunsCompleted) }) @@ -1763,7 +1766,7 @@ func TestPartitionCompactor_ShouldNotFailCompactionIfAccessDeniedErrDuringMetaSy require.NoError(t, services.StartAndAwaitRunning(context.Background(), c)) // Wait until a run has completed. - cortex_testutil.Poll(t, 20*time.Second, 1.0, func() interface{} { + cortex_testutil.Poll(t, 20*time.Second, 1.0, func() any { return prom_testutil.ToFloat64(c.CompactionRunsCompleted) }) @@ -1817,7 +1820,7 @@ func TestPartitionCompactor_ShouldNotFailCompactionIfAccessDeniedErrReturnedFrom require.NoError(t, services.StartAndAwaitRunning(context.Background(), c)) // Wait until a run has completed. - cortex_testutil.Poll(t, 20*time.Second, 1.0, func() interface{} { + cortex_testutil.Poll(t, 20*time.Second, 1.0, func() any { return prom_testutil.ToFloat64(c.CompactionRunsCompleted) }) diff --git a/pkg/compactor/compactor_ring.go b/pkg/compactor/compactor_ring.go index c205ee80f5..430f042a7a 100644 --- a/pkg/compactor/compactor_ring.go +++ b/pkg/compactor/compactor_ring.go @@ -18,10 +18,11 @@ import ( // is used to strip down the config to the minimum, and avoid confusion // to the user. type RingConfig struct { - KVStore kv.Config `yaml:"kvstore"` - HeartbeatPeriod time.Duration `yaml:"heartbeat_period"` - HeartbeatTimeout time.Duration `yaml:"heartbeat_timeout"` - AutoForgetDelay time.Duration `yaml:"auto_forget_delay"` + KVStore kv.Config `yaml:"kvstore"` + HeartbeatPeriod time.Duration `yaml:"heartbeat_period"` + HeartbeatTimeout time.Duration `yaml:"heartbeat_timeout"` + AutoForgetDelay time.Duration `yaml:"auto_forget_delay"` + DetailedMetricsEnabled bool `yaml:"detailed_metrics_enabled"` // Wait ring stability. WaitStabilityMinDuration time.Duration `yaml:"wait_stability_min_duration"` @@ -55,6 +56,7 @@ func (cfg *RingConfig) RegisterFlags(f *flag.FlagSet) { cfg.KVStore.RegisterFlagsWithPrefix("compactor.ring.", "collectors/", f) f.DurationVar(&cfg.HeartbeatPeriod, "compactor.ring.heartbeat-period", 5*time.Second, "Period at which to heartbeat to the ring. 0 = disabled.") f.DurationVar(&cfg.HeartbeatTimeout, "compactor.ring.heartbeat-timeout", time.Minute, "The heartbeat timeout after which compactors are considered unhealthy within the ring. 0 = never (timeout disabled).") + f.BoolVar(&cfg.DetailedMetricsEnabled, "compactor.ring.detailed-metrics-enabled", true, "Set to true to enable ring detailed metrics. These metrics provide detailed information, such as token count and ownership per tenant. Disabling them can significantly decrease the number of metrics emitted.") f.DurationVar(&cfg.AutoForgetDelay, "compactor.auto-forget-delay", 2*cfg.HeartbeatTimeout, "Time since last heartbeat before compactor will be removed from ring. 0 to disable") // Wait stability flags. @@ -89,6 +91,7 @@ func (cfg *RingConfig) ToLifecyclerConfig() ring.LifecyclerConfig { rc.KVStore = cfg.KVStore rc.HeartbeatTimeout = cfg.HeartbeatTimeout rc.ReplicationFactor = 1 + rc.DetailedMetricsEnabled = cfg.DetailedMetricsEnabled // Configure lifecycler lc.RingConfig = rc diff --git a/pkg/compactor/compactor_test.go b/pkg/compactor/compactor_test.go index a76afa4a20..5724c94699 100644 --- a/pkg/compactor/compactor_test.go +++ b/pkg/compactor/compactor_test.go @@ -193,7 +193,7 @@ func TestCompactor_SkipCompactionWhenCmkError(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(context.Background(), c)) // Wait until a run has completed. - cortex_testutil.Poll(t, time.Second, 1.0, func() interface{} { + cortex_testutil.Poll(t, time.Second, 1.0, func() any { return prom_testutil.ToFloat64(c.CompactionRunsCompleted) }) @@ -214,7 +214,7 @@ func TestCompactor_ShouldDoNothingOnNoUserBlocks(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(context.Background(), c)) // Wait until a run has completed. - cortex_testutil.Poll(t, time.Second, 1.0, func() interface{} { + cortex_testutil.Poll(t, time.Second, 1.0, func() any { return prom_testutil.ToFloat64(c.CompactionRunsCompleted) }) @@ -291,7 +291,7 @@ func TestCompactor_ShouldRetryCompactionOnFailureWhileDiscoveringUsersFromBucket require.NoError(t, services.StartAndAwaitRunning(context.Background(), c)) // Wait until all retry attempts have completed. - cortex_testutil.Poll(t, time.Second, 1.0, func() interface{} { + cortex_testutil.Poll(t, time.Second, 1.0, func() any { return prom_testutil.ToFloat64(c.CompactionRunsFailed) }) @@ -386,7 +386,7 @@ func TestCompactor_ShouldIncrementCompactionErrorIfFailedToCompactASingleTenant( require.NoError(t, services.StartAndAwaitRunning(context.Background(), c)) // Wait until all retry attempts have completed. - cortex_testutil.Poll(t, time.Second, 1.0, func() interface{} { + cortex_testutil.Poll(t, time.Second, 1.0, func() any { return prom_testutil.ToFloat64(c.CompactionRunsFailed) }) @@ -450,7 +450,7 @@ func TestCompactor_ShouldCompactAndRemoveUserFolder(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(context.Background(), c)) // Wait until a run has completed. - cortex_testutil.Poll(t, time.Second, 1.0, func() interface{} { + cortex_testutil.Poll(t, time.Second, 1.0, func() any { return prom_testutil.ToFloat64(c.CompactionRunsCompleted) }) @@ -519,7 +519,7 @@ func TestCompactor_ShouldIterateOverUsersAndRunCompaction(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(context.Background(), c)) // Wait until a run has completed. - cortex_testutil.Poll(t, time.Second, 1.0, func() interface{} { + cortex_testutil.Poll(t, time.Second, 1.0, func() any { return prom_testutil.ToFloat64(c.CompactionRunsCompleted) }) @@ -654,7 +654,7 @@ func TestCompactor_ShouldNotCompactBlocksMarkedForDeletion(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(context.Background(), c)) // Wait until a run has completed. - cortex_testutil.Poll(t, time.Second, 1.0, func() interface{} { + cortex_testutil.Poll(t, time.Second, 1.0, func() any { return prom_testutil.ToFloat64(c.CompactionRunsCompleted) }) @@ -787,7 +787,7 @@ func TestCompactor_ShouldNotCompactBlocksMarkedForSkipCompact(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(context.Background(), c)) - cortex_testutil.Poll(t, time.Second, 1.0, func() interface{} { + cortex_testutil.Poll(t, time.Second, 1.0, func() any { return prom_testutil.ToFloat64(c.CompactionRunsCompleted) }) @@ -851,7 +851,7 @@ func TestCompactor_ShouldNotCompactBlocksForUsersMarkedForDeletion(t *testing.T) require.NoError(t, services.StartAndAwaitRunning(context.Background(), c)) // Wait until a run has completed. - cortex_testutil.Poll(t, time.Second, 1.0, func() interface{} { + cortex_testutil.Poll(t, time.Second, 1.0, func() any { return prom_testutil.ToFloat64(c.CompactionRunsCompleted) }) @@ -961,7 +961,7 @@ func TestCompactor_ShouldSkipOutOrOrderBlocks(t *testing.T) { defer services.StopAndAwaitTerminated(context.Background(), c) //nolint:errcheck // Wait until a run has completed. - cortex_testutil.Poll(t, 5*time.Second, true, func() interface{} { + cortex_testutil.Poll(t, 5*time.Second, true, func() any { if _, err := os.Stat(path.Join(dir, "no-compact-mark.json")); err == nil { return true } @@ -1047,7 +1047,7 @@ func TestCompactor_ShouldCompactAllUsersOnShardingEnabledButOnlyOneInstanceRunni require.NoError(t, services.StartAndAwaitRunning(context.Background(), c)) // Wait until a run has completed. - cortex_testutil.Poll(t, 5*time.Second, 1.0, func() interface{} { + cortex_testutil.Poll(t, 5*time.Second, 1.0, func() any { return prom_testutil.ToFloat64(c.CompactionRunsCompleted) }) @@ -1150,7 +1150,7 @@ func TestCompactor_ShouldCompactOnlyUsersOwnedByTheInstanceOnShardingEnabledAndM // Wait until a run has been completed on each compactor for _, c := range compactors { - cortex_testutil.Poll(t, 120*time.Second, true, func() interface{} { + cortex_testutil.Poll(t, 120*time.Second, true, func() any { return prom_testutil.ToFloat64(c.CompactionRunsCompleted) >= 1 }) } @@ -1294,7 +1294,7 @@ func TestCompactor_ShouldCompactOnlyShardsOwnedByTheInstanceOnShardingEnabledWit // Wait until a run has been completed on each compactor for _, c := range compactors { - cortex_testutil.Poll(t, 60*time.Second, 2.0, func() interface{} { + cortex_testutil.Poll(t, 60*time.Second, 2.0, func() any { return prom_testutil.ToFloat64(c.CompactionRunsCompleted) }) } @@ -1362,7 +1362,7 @@ func createTSDBBlock(t *testing.T, bkt objstore.Bucket, userID string, minT, max // Append a sample at the beginning and one at the end of the time range. for i, ts := range []int64{minT, maxT - 1} { - lbls := labels.Labels{labels.Label{Name: "series_id", Value: strconv.Itoa(i)}} + lbls := labels.FromStrings("series_id", strconv.Itoa(i)) app := db.Appender(context.Background()) _, err := app.Append(0, lbls, ts, float64(i)) @@ -1511,7 +1511,7 @@ func removeIgnoredLogs(input []string) []string { executionIDRe := regexp.MustCompile(`\s?execution_id=\S+`) main: - for i := 0; i < len(input); i++ { + for i := range input { log := input[i] // Remove any duration from logs. @@ -1821,7 +1821,7 @@ func TestCompactor_DeleteLocalSyncFiles(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(context.Background(), c1)) // Wait until a run has been completed on first compactor. This happens as soon as compactor starts. - cortex_testutil.Poll(t, 10*time.Second, 1.0, func() interface{} { + cortex_testutil.Poll(t, 10*time.Second, 1.0, func() any { return prom_testutil.ToFloat64(c1.CompactionRunsCompleted) }) @@ -1832,7 +1832,7 @@ func TestCompactor_DeleteLocalSyncFiles(t *testing.T) { // Now start second compactor, and wait until it runs compaction. require.NoError(t, services.StartAndAwaitRunning(context.Background(), c2)) - cortex_testutil.Poll(t, 10*time.Second, 1.0, func() interface{} { + cortex_testutil.Poll(t, 10*time.Second, 1.0, func() any { return prom_testutil.ToFloat64(c2.CompactionRunsCompleted) }) @@ -1918,7 +1918,7 @@ func TestCompactor_ShouldNotTreatInterruptionsAsErrors(t *testing.T) { }, nil) require.NoError(t, services.StartAndAwaitRunning(ctx, c)) - cortex_testutil.Poll(t, 1*time.Second, 1.0, func() interface{} { + cortex_testutil.Poll(t, 1*time.Second, 1.0, func() any { return prom_testutil.ToFloat64(c.CompactionRunsInterrupted) }) @@ -1991,7 +1991,7 @@ func TestCompactor_ShouldNotFailCompactionIfAccessDeniedErrDuringMetaSync(t *tes require.NoError(t, services.StartAndAwaitRunning(context.Background(), c)) // Wait until a run has completed. - cortex_testutil.Poll(t, 20*time.Second, 1.0, func() interface{} { + cortex_testutil.Poll(t, 20*time.Second, 1.0, func() any { return prom_testutil.ToFloat64(c.CompactionRunsCompleted) }) @@ -2042,7 +2042,7 @@ func TestCompactor_ShouldNotFailCompactionIfAccessDeniedErrReturnedFromBucket(t require.NoError(t, services.StartAndAwaitRunning(context.Background(), c)) // Wait until a run has completed. - cortex_testutil.Poll(t, 20*time.Second, 1.0, func() interface{} { + cortex_testutil.Poll(t, 20*time.Second, 1.0, func() any { return prom_testutil.ToFloat64(c.CompactionRunsCompleted) }) @@ -2088,7 +2088,7 @@ func TestCompactor_FailedWithRetriableError(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(context.Background(), c)) - cortex_testutil.Poll(t, 1*time.Second, 2.0, func() interface{} { + cortex_testutil.Poll(t, 1*time.Second, 2.0, func() any { return prom_testutil.ToFloat64(c.compactorMetrics.compactionErrorsCount.WithLabelValues("user-1", retriableError)) }) @@ -2142,7 +2142,7 @@ func TestCompactor_FailedWithHaltError(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(context.Background(), c)) - cortex_testutil.Poll(t, 1*time.Second, 1.0, func() interface{} { + cortex_testutil.Poll(t, 1*time.Second, 1.0, func() any { return prom_testutil.ToFloat64(c.compactorMetrics.compactionErrorsCount.WithLabelValues("user-1", haltError)) }) @@ -2173,7 +2173,7 @@ func TestCompactor_RingLifecyclerShouldAutoForgetUnhealthyInstances(t *testing.T // Create two compactors var compactors []*Compactor - for i := 0; i < 2; i++ { + for i := range 2 { // Setup config cfg := prepareConfig() @@ -2209,11 +2209,11 @@ func TestCompactor_RingLifecyclerShouldAutoForgetUnhealthyInstances(t *testing.T require.NoError(t, services.StartAndAwaitRunning(context.Background(), compactor2)) // Wait until a run has completed. - cortex_testutil.Poll(t, 20*time.Second, 1.0, func() interface{} { + cortex_testutil.Poll(t, 20*time.Second, 1.0, func() any { return prom_testutil.ToFloat64(compactor2.CompactionRunsCompleted) }) - cortex_testutil.Poll(t, 5000*time.Millisecond, true, func() interface{} { + cortex_testutil.Poll(t, 5000*time.Millisecond, true, func() any { healthy, unhealthy, _ := compactor.ring.GetAllInstanceDescs(ring.Reporting) return len(healthy) == 2 && len(unhealthy) == 0 }) @@ -2222,7 +2222,7 @@ func TestCompactor_RingLifecyclerShouldAutoForgetUnhealthyInstances(t *testing.T // compactor service while UnregisterOnShutdown is false require.NoError(t, services.StopAndAwaitTerminated(context.Background(), compactor2)) - cortex_testutil.Poll(t, 5000*time.Millisecond, true, func() interface{} { + cortex_testutil.Poll(t, 5000*time.Millisecond, true, func() any { healthy, unhealthy, _ := compactor.ring.GetAllInstanceDescs(ring.Reporting) return len(healthy) == 1 && len(unhealthy) == 0 }) @@ -2282,7 +2282,7 @@ func TestCompactor_GetShardSizeForUser(t *testing.T) { // Create compactors var compactors []*Compactor - for i := 0; i < 5; i++ { + for i := range 5 { // Setup config cfg := prepareConfig() @@ -2317,7 +2317,7 @@ func TestCompactor_GetShardSizeForUser(t *testing.T) { // Wait until a run has been completed on each compactor for _, c := range compactors { - cortex_testutil.Poll(t, 120*time.Second, true, func() interface{} { + cortex_testutil.Poll(t, 120*time.Second, true, func() any { return prom_testutil.ToFloat64(c.CompactionRunsCompleted) >= 1 }) } @@ -2366,7 +2366,7 @@ func TestCompactor_GetShardSizeForUser(t *testing.T) { // Wait until a run has been completed on each compactor for _, c := range compactors2 { - cortex_testutil.Poll(t, 120*time.Second, true, func() interface{} { + cortex_testutil.Poll(t, 120*time.Second, true, func() any { return prom_testutil.ToFloat64(c.CompactionRunsCompleted) >= 1 }) } diff --git a/pkg/compactor/partition_compaction_grouper.go b/pkg/compactor/partition_compaction_grouper.go index 711df2d0a3..2d308fb636 100644 --- a/pkg/compactor/partition_compaction_grouper.go +++ b/pkg/compactor/partition_compaction_grouper.go @@ -409,7 +409,7 @@ func (g *PartitionCompactionGrouper) partitionBlockGroup(group blocksGroupWithPa } partitions := make([]Partition, partitionCount) - for partitionID := 0; partitionID < partitionCount; partitionID++ { + for partitionID := range partitionCount { partitionedGroup := partitionedGroups[partitionID] blockIDs := make([]ulid.ULID, len(partitionedGroup.blocks)) for i, m := range partitionedGroup.blocks { @@ -468,10 +468,7 @@ func (g *PartitionCompactionGrouper) calculatePartitionCount(group blocksGroupWi if seriesCountLimit > 0 && totalSeriesCount > seriesCountLimit { partitionNumberBasedOnSeries = g.findNearestPartitionNumber(float64(totalSeriesCount), float64(seriesCountLimit)) } - partitionNumber := partitionNumberBasedOnIndex - if partitionNumberBasedOnSeries > partitionNumberBasedOnIndex { - partitionNumber = partitionNumberBasedOnSeries - } + partitionNumber := max(partitionNumberBasedOnSeries, partitionNumberBasedOnIndex) level.Info(g.logger).Log("msg", "calculated partition number for group", "partitioned_group_id", groupHash, "partition_number", partitionNumber, "total_index_size", totalIndexSizeInBytes, "index_size_limit", indexSizeLimit, "total_series_count", totalSeriesCount, "series_count_limit", seriesCountLimit, "group", group.String()) return partitionNumber } diff --git a/pkg/compactor/partition_compaction_grouper_test.go b/pkg/compactor/partition_compaction_grouper_test.go index 774ae23f11..6ca1ee8877 100644 --- a/pkg/compactor/partition_compaction_grouper_test.go +++ b/pkg/compactor/partition_compaction_grouper_test.go @@ -1,7 +1,6 @@ package compactor import ( - "context" "encoding/json" "fmt" "path" @@ -2080,12 +2079,8 @@ func TestPartitionCompactionGrouper_GenerateCompactionJobs(t *testing.T) { b.fixPartitionInfo(t, userID) } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - ingestionReplicationFactor := 1 - if testCase.ingestionReplicationFactor > 1 { - ingestionReplicationFactor = testCase.ingestionReplicationFactor - } + ctx := t.Context() + ingestionReplicationFactor := max(testCase.ingestionReplicationFactor, 1) g := NewPartitionCompactionGrouper( ctx, nil, diff --git a/pkg/compactor/sharded_compaction_lifecycle_callback_test.go b/pkg/compactor/sharded_compaction_lifecycle_callback_test.go index 0c0b8f0f34..9e598a2edc 100644 --- a/pkg/compactor/sharded_compaction_lifecycle_callback_test.go +++ b/pkg/compactor/sharded_compaction_lifecycle_callback_test.go @@ -9,6 +9,7 @@ import ( "github.com/go-kit/log" "github.com/oklog/ulid/v2" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/tsdb" "github.com/stretchr/testify/require" "github.com/thanos-io/thanos/pkg/block/metadata" @@ -46,7 +47,7 @@ func TestPreCompactionCallback(t *testing.T) { log.NewNopLogger(), nil, testGroupKey, - nil, + labels.EmptyLabels(), 0, true, true, diff --git a/pkg/compactor/sharded_posting.go b/pkg/compactor/sharded_posting.go index b0c29ca1c9..09115de684 100644 --- a/pkg/compactor/sharded_posting.go +++ b/pkg/compactor/sharded_posting.go @@ -28,10 +28,10 @@ func NewShardedPosting(ctx context.Context, postings index.Postings, partitionCo if builder.Labels().Hash()%partitionCount == partitionID { posting := postings.At() series = append(series, posting) - for _, label := range builder.Labels() { - symbols[label.Name] = struct{}{} - symbols[label.Value] = struct{}{} - } + builder.Labels().Range(func(l labels.Label) { + symbols[l.Name] = struct{}{} + symbols[l.Value] = struct{}{} + }) } } return index.NewListPostings(series), symbols, nil diff --git a/pkg/compactor/sharded_posting_test.go b/pkg/compactor/sharded_posting_test.go index e65b9b5291..50f8bd557c 100644 --- a/pkg/compactor/sharded_posting_test.go +++ b/pkg/compactor/sharded_posting_test.go @@ -44,17 +44,13 @@ func TestShardPostingAndSymbolBasedOnPartitionID(t *testing.T) { expectedSymbols[ConstLabelName] = false expectedSymbols[ConstLabelValue] = false expectedSeriesCount := 10 - for i := 0; i < expectedSeriesCount; i++ { + for range expectedSeriesCount { labelValue := strconv.Itoa(r.Int()) - series = append(series, labels.Labels{ - metricName, - {Name: ConstLabelName, Value: ConstLabelValue}, - {Name: TestLabelName, Value: labelValue}, - }) + series = append(series, labels.FromStrings(metricName.Name, metricName.Value, ConstLabelName, ConstLabelValue, TestLabelName, labelValue)) expectedSymbols[TestLabelName] = false expectedSymbols[labelValue] = false } - blockID, err := e2eutil.CreateBlock(context.Background(), tmpdir, series, 10, time.Now().Add(-10*time.Minute).UnixMilli(), time.Now().UnixMilli(), nil, 0, metadata.NoneFunc, nil) + blockID, err := e2eutil.CreateBlock(context.Background(), tmpdir, series, 10, time.Now().Add(-10*time.Minute).UnixMilli(), time.Now().UnixMilli(), labels.EmptyLabels(), 0, metadata.NoneFunc, nil) require.NoError(t, err) var closers []io.Closer @@ -64,7 +60,7 @@ func TestShardPostingAndSymbolBasedOnPartitionID(t *testing.T) { } }() seriesCount := 0 - for partitionID := 0; partitionID < partitionCount; partitionID++ { + for partitionID := range partitionCount { ir, err := index.NewFileReader(filepath.Join(tmpdir, blockID.String(), "index"), index.DecodePostingsRaw) closers = append(closers, ir) require.NoError(t, err) @@ -82,10 +78,10 @@ func TestShardPostingAndSymbolBasedOnPartitionID(t *testing.T) { require.NoError(t, err) require.Equal(t, uint64(partitionID), builder.Labels().Hash()%uint64(partitionCount)) seriesCount++ - for _, label := range builder.Labels() { - expectedShardedSymbols[label.Name] = struct{}{} - expectedShardedSymbols[label.Value] = struct{}{} - } + builder.Labels().Range(func(l labels.Label) { + expectedShardedSymbols[l.Name] = struct{}{} + expectedShardedSymbols[l.Value] = struct{}{} + }) } err = ir.Close() if err == nil { diff --git a/pkg/compactor/shuffle_sharding_grouper_test.go b/pkg/compactor/shuffle_sharding_grouper_test.go index 9cc0bb25d9..3ff9100375 100644 --- a/pkg/compactor/shuffle_sharding_grouper_test.go +++ b/pkg/compactor/shuffle_sharding_grouper_test.go @@ -2,7 +2,6 @@ package compactor import ( "bytes" - "context" "encoding/json" "path" "testing" @@ -380,8 +379,7 @@ func TestShuffleShardingGrouper_Groups(t *testing.T) { return testData.noCompactBlocks } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() g := NewShuffleShardingGrouper( ctx, nil, diff --git a/pkg/configs/api/api.go b/pkg/configs/api/api.go index 5ae41eade7..56cf15bc94 100644 --- a/pkg/configs/api/api.go +++ b/pkg/configs/api/api.go @@ -350,8 +350,8 @@ func parseConfigFormat(v string, defaultFormat string) string { if v == "" { return defaultFormat } - parts := strings.Split(v, ",") - for _, part := range parts { + parts := strings.SplitSeq(v, ",") + for part := range parts { mimeType, _, err := mime.ParseMediaType(part) if err != nil { continue diff --git a/pkg/configs/db/postgres/postgres.go b/pkg/configs/db/postgres/postgres.go index 7ebd464bf8..11d4ce233a 100644 --- a/pkg/configs/db/postgres/postgres.go +++ b/pkg/configs/db/postgres/postgres.go @@ -43,9 +43,9 @@ type DB struct { } type dbProxy interface { - Exec(query string, args ...interface{}) (sql.Result, error) - Query(query string, args ...interface{}) (*sql.Rows, error) - QueryRow(query string, args ...interface{}) *sql.Row + Exec(query string, args ...any) (sql.Result, error) + Query(query string, args ...any) (*sql.Rows, error) + QueryRow(query string, args ...any) *sql.Row Prepare(query string) (*sql.Stmt, error) } diff --git a/pkg/configs/db/traced.go b/pkg/configs/db/traced.go index 6f7bf7e014..962bfebefc 100644 --- a/pkg/configs/db/traced.go +++ b/pkg/configs/db/traced.go @@ -15,7 +15,7 @@ type traced struct { d DB } -func (t traced) trace(name string, args ...interface{}) { +func (t traced) trace(name string, args ...any) { level.Debug(util_log.Logger).Log("msg", fmt.Sprintf("%s: %#v", name, args)) } diff --git a/pkg/configs/query_protection.go b/pkg/configs/query_protection.go index 2dd353580d..756a9f3620 100644 --- a/pkg/configs/query_protection.go +++ b/pkg/configs/query_protection.go @@ -14,7 +14,6 @@ type QueryProtection struct { } type rejection struct { - Enabled bool `yaml:"enabled"` Threshold threshold `yaml:"threshold"` } @@ -24,7 +23,6 @@ type threshold struct { } func (cfg *QueryProtection) RegisterFlagsWithPrefix(f *flag.FlagSet, prefix string) { - f.BoolVar(&cfg.Rejection.Enabled, prefix+"query-protection.rejection.enabled", false, "EXPERIMENTAL: Enable query rejection feature, where the component return 503 to all incoming query requests when the configured thresholds are breached.") f.Float64Var(&cfg.Rejection.Threshold.CPUUtilization, prefix+"query-protection.rejection.threshold.cpu-utilization", 0, "EXPERIMENTAL: Max CPU utilization that this ingester can reach before rejecting new query request (across all tenants) in percentage, between 0 and 1. monitored_resources config must include the resource type. 0 to disable.") f.Float64Var(&cfg.Rejection.Threshold.HeapUtilization, prefix+"query-protection.rejection.threshold.heap-utilization", 0, "EXPERIMENTAL: Max heap utilization that this ingester can reach before rejecting new query request (across all tenants) in percentage, between 0 and 1. monitored_resources config must include the resource type. 0 to disable.") } diff --git a/pkg/configs/userconfig/config.go b/pkg/configs/userconfig/config.go index 25e7d39b38..d218c9788e 100644 --- a/pkg/configs/userconfig/config.go +++ b/pkg/configs/userconfig/config.go @@ -53,7 +53,7 @@ func (v RuleFormatVersion) MarshalJSON() ([]byte, error) { } // MarshalYAML implements yaml.Marshaler. -func (v RuleFormatVersion) MarshalYAML() (interface{}, error) { +func (v RuleFormatVersion) MarshalYAML() (any, error) { switch v { case RuleFormatV1: return yaml.Marshal("1") @@ -82,7 +82,7 @@ func (v *RuleFormatVersion) UnmarshalJSON(data []byte) error { } // UnmarshalYAML implements yaml.Unmarshaler. -func (v *RuleFormatVersion) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (v *RuleFormatVersion) UnmarshalYAML(unmarshal func(any) error) error { var s string if err := unmarshal(&s); err != nil { return err @@ -129,7 +129,7 @@ func (c Config) MarshalJSON() ([]byte, error) { } // MarshalYAML implements yaml.Marshaler. -func (c Config) MarshalYAML() (interface{}, error) { +func (c Config) MarshalYAML() (any, error) { compat := &configCompat{ RulesFiles: c.RulesConfig.Files, RuleFormatVersion: c.RulesConfig.FormatVersion, @@ -158,7 +158,7 @@ func (c *Config) UnmarshalJSON(data []byte) error { } // UnmarshalYAML implements yaml.Unmarshaler. -func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (c *Config) UnmarshalYAML(unmarshal func(any) error) error { compat := configCompat{} if err := unmarshal(&compat); err != nil { return errors.WithStack(err) @@ -308,7 +308,7 @@ func (c RulesConfig) parseV2() (map[string][]rules.Rule, error) { time.Duration(rl.KeepFiringFor), labels.FromMap(rl.Labels), labels.FromMap(rl.Annotations), - nil, + labels.EmptyLabels(), "", true, util_log.GoKitLogToSlog(log.With(util_log.Logger, "alert", rl.Alert)), diff --git a/pkg/configs/userconfig/config_test.go b/pkg/configs/userconfig/config_test.go index 392ca911ca..d17dae574d 100644 --- a/pkg/configs/userconfig/config_test.go +++ b/pkg/configs/userconfig/config_test.go @@ -86,13 +86,9 @@ func TestParseLegacyAlerts(t *testing.T) { parsed, 5*time.Minute, 0, - labels.Labels{ - labels.Label{Name: "severity", Value: "critical"}, - }, - labels.Labels{ - labels.Label{Name: "message", Value: "I am a message"}, - }, - nil, + labels.FromStrings("severity", "critical"), + labels.FromStrings("message", "I am a message"), + labels.EmptyLabels(), "", true, util_log.GoKitLogToSlog(log.With(util_log.Logger, "alert", "TestAlert")), diff --git a/pkg/cortex/cortex.go b/pkg/cortex/cortex.go index 379501db0e..e7575abdce 100644 --- a/pkg/cortex/cortex.go +++ b/pkg/cortex/cortex.go @@ -8,13 +8,13 @@ import ( "net/http" "os" "reflect" + "slices" "strings" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/prometheus/promql" prom_storage "github.com/prometheus/prometheus/storage" "github.com/weaveworks/common/server" "github.com/weaveworks/common/signals" @@ -35,6 +35,7 @@ import ( "github.com/cortexproject/cortex/pkg/cortex/storage" "github.com/cortexproject/cortex/pkg/cortexpb" "github.com/cortexproject/cortex/pkg/distributor" + "github.com/cortexproject/cortex/pkg/engine" "github.com/cortexproject/cortex/pkg/flusher" "github.com/cortexproject/cortex/pkg/frontend" frontendv1 "github.com/cortexproject/cortex/pkg/frontend/v1" @@ -114,7 +115,7 @@ type Config struct { QueryRange queryrange.Config `yaml:"query_range"` BlocksStorage tsdb.BlocksStorageConfig `yaml:"blocks_storage"` Compactor compactor.Config `yaml:"compactor"` - ParquetConverter parquetconverter.Config `yaml:"parquet_converter" doc:"hidden"` + ParquetConverter parquetconverter.Config `yaml:"parquet_converter"` StoreGateway storegateway.Config `yaml:"store_gateway"` TenantFederation tenantfederation.Config `yaml:"tenant_federation"` @@ -250,7 +251,7 @@ func (c *Config) Validate(log log.Logger) error { } func (c *Config) isModuleEnabled(m string) bool { - return util.StringsContain(c.Target, m) + return slices.Contains(c.Target, m) } // validateYAMLEmptyNodes ensure that no empty node has been specified in the YAML config file. @@ -322,7 +323,7 @@ type Cortex struct { QuerierQueryable prom_storage.SampleAndChunkQueryable ExemplarQueryable prom_storage.ExemplarQueryable MetadataQuerier querier.MetadataQuerier - QuerierEngine promql.QueryEngine + QuerierEngine engine.QueryEngine QueryFrontendTripperware tripperware.Tripperware ResourceMonitor *resource.Monitor @@ -393,10 +394,8 @@ func (t *Cortex) setupThanosTracing() { // setupGRPCHeaderForwarding appends a gRPC middleware used to enable the propagation of // HTTP Headers through child gRPC calls func (t *Cortex) setupGRPCHeaderForwarding() { - if len(t.Cfg.API.HTTPRequestHeadersToLog) > 0 { - t.Cfg.Server.GRPCMiddleware = append(t.Cfg.Server.GRPCMiddleware, grpcutil.HTTPHeaderPropagationServerInterceptor) - t.Cfg.Server.GRPCStreamMiddleware = append(t.Cfg.Server.GRPCStreamMiddleware, grpcutil.HTTPHeaderPropagationStreamServerInterceptor) - } + t.Cfg.Server.GRPCMiddleware = append(t.Cfg.Server.GRPCMiddleware, grpcutil.HTTPHeaderPropagationServerInterceptor) + t.Cfg.Server.GRPCStreamMiddleware = append(t.Cfg.Server.GRPCStreamMiddleware, grpcutil.HTTPHeaderPropagationStreamServerInterceptor) } func (t *Cortex) setupRequestSigning() { diff --git a/pkg/cortex/modules.go b/pkg/cortex/modules.go index e9a51f2c3c..013dbb9083 100644 --- a/pkg/cortex/modules.go +++ b/pkg/cortex/modules.go @@ -4,7 +4,6 @@ import ( "context" "flag" "fmt" - "log/slog" "net/http" "runtime" @@ -44,6 +43,7 @@ import ( "github.com/cortexproject/cortex/pkg/querier/tripperware/instantquery" "github.com/cortexproject/cortex/pkg/querier/tripperware/queryrange" querier_worker "github.com/cortexproject/cortex/pkg/querier/worker" + cortexquerysharding "github.com/cortexproject/cortex/pkg/querysharding" "github.com/cortexproject/cortex/pkg/ring" "github.com/cortexproject/cortex/pkg/ring/kv/codec" "github.com/cortexproject/cortex/pkg/ring/kv/memberlist" @@ -364,6 +364,7 @@ func (t *Cortex) initQuerier() (serv services.Service, err error) { // to a Prometheus API struct instantiated with the Cortex Queryable. internalQuerierRouter := api.NewQuerierHandler( t.Cfg.API, + t.Cfg.Querier, t.QuerierQueryable, t.ExemplarQueryable, t.QuerierEngine, @@ -402,9 +403,7 @@ func (t *Cortex) initQuerier() (serv services.Service, err error) { // request context. internalQuerierRouter = t.API.AuthMiddleware.Wrap(internalQuerierRouter) - if len(t.Cfg.API.HTTPRequestHeadersToLog) > 0 { - internalQuerierRouter = t.API.HTTPHeaderMiddleware.Wrap(internalQuerierRouter) - } + internalQuerierRouter = t.API.HTTPHeaderMiddleware.Wrap(internalQuerierRouter) } // If neither frontend address or scheduler address is configured, no worker is needed. @@ -414,6 +413,9 @@ func (t *Cortex) initQuerier() (serv services.Service, err error) { t.Cfg.Worker.MaxConcurrentRequests = t.Cfg.Querier.MaxConcurrent t.Cfg.Worker.TargetHeaders = t.Cfg.API.HTTPRequestHeadersToLog + + t.Cfg.Worker.ListenPort = t.Cfg.Server.GRPCListenPort + return querier_worker.NewQuerierWorker(t.Cfg.Worker, httpgrpc_server.NewServer(internalQuerierRouter), util_log.Logger, prometheus.DefaultRegisterer) } @@ -511,7 +513,13 @@ func (t *Cortex) initFlusher() (serv services.Service, err error) { // initQueryFrontendTripperware instantiates the tripperware used by the query frontend // to optimize Prometheus query requests. func (t *Cortex) initQueryFrontendTripperware() (serv services.Service, err error) { - queryAnalyzer := querysharding.NewQueryAnalyzer() + var queryAnalyzer querysharding.Analyzer + queryAnalyzer = querysharding.NewQueryAnalyzer() + if t.Cfg.Querier.EnableParquetQueryable { + // Disable vertical sharding for binary expression with ignore for parquet queryable. + queryAnalyzer = cortexquerysharding.NewDisableBinaryExpressionAnalyzer(queryAnalyzer) + } + // PrometheusCodec is a codec to encode and decode Prometheus query range requests and responses. prometheusCodec := queryrange.NewPrometheusCodec(false, t.Cfg.Querier.ResponseCompression, t.Cfg.API.QuerierDefaultCodec) // ShardedPrometheusCodec is same as PrometheusCodec but to be used on the sharded queries (it sum up the stats) @@ -534,7 +542,8 @@ func (t *Cortex) initQueryFrontendTripperware() (serv services.Service, err erro shardedPrometheusCodec, t.Cfg.Querier.LookbackDelta, t.Cfg.Querier.DefaultEvaluationInterval, - t.Cfg.Frontend.DistributedExecEnabled, + t.Cfg.Querier.DistributedExecEnabled, + t.Cfg.Querier.ThanosEngine.LogicalOptimizers, ) if err != nil { return nil, err @@ -547,7 +556,8 @@ func (t *Cortex) initQueryFrontendTripperware() (serv services.Service, err erro queryAnalyzer, t.Cfg.Querier.LookbackDelta, t.Cfg.Querier.DefaultEvaluationInterval, - t.Cfg.Frontend.DistributedExecEnabled) + t.Cfg.Querier.DistributedExecEnabled, + t.Cfg.Querier.ThanosEngine.LogicalOptimizers) if err != nil { return nil, err } @@ -785,6 +795,7 @@ func (t *Cortex) initMemberlistKV() (services.Service, error) { t.Cfg.Compactor.ShardingRing.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV t.Cfg.Ruler.Ring.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV t.Cfg.Alertmanager.ShardingRing.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV + t.Cfg.ParquetConverter.Ring.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV return t.MemberlistKV, nil } @@ -806,7 +817,7 @@ func (t *Cortex) initQueryScheduler() (services.Service, error) { tenant.WithDefaultResolver(tenantfederation.NewRegexValidator()) } - s, err := scheduler.NewScheduler(t.Cfg.QueryScheduler, t.Overrides, util_log.Logger, prometheus.DefaultRegisterer) + s, err := scheduler.NewScheduler(t.Cfg.QueryScheduler, t.Overrides, util_log.Logger, prometheus.DefaultRegisterer, t.Cfg.Querier.DistributedExecEnabled) if err != nil { return nil, errors.Wrap(err, "query-scheduler init") } diff --git a/pkg/cortex/runtime_config.go b/pkg/cortex/runtime_config.go index c2bcc786d9..5f71746c2a 100644 --- a/pkg/cortex/runtime_config.go +++ b/pkg/cortex/runtime_config.go @@ -64,7 +64,7 @@ type runtimeConfigLoader struct { cfg Config } -func (l runtimeConfigLoader) load(r io.Reader) (interface{}, error) { +func (l runtimeConfigLoader) load(r io.Reader) (any, error) { var overrides = &RuntimeConfigValues{} decoder := yaml.NewDecoder(r) @@ -145,7 +145,7 @@ func runtimeConfigHandler(runtimeCfgManager *runtimeconfig.Manager, defaultLimit return } - var output interface{} + var output any switch r.URL.Query().Get("mode") { case "diff": // Default runtime config is just empty struct, but to make diff work, diff --git a/pkg/cortex/tracing.go b/pkg/cortex/tracing.go index 1cdfa6a819..15839f95b9 100644 --- a/pkg/cortex/tracing.go +++ b/pkg/cortex/tracing.go @@ -11,14 +11,14 @@ import ( // ThanosTracerUnaryInterceptor injects the opentracing global tracer into the context // in order to get it picked up by Thanos components. -func ThanosTracerUnaryInterceptor(ctx context.Context, req interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { +func ThanosTracerUnaryInterceptor(ctx context.Context, req any, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) { ctx = objstoretracing.ContextWithTracer(ctx, opentracing.GlobalTracer()) return handler(tracing.ContextWithTracer(ctx, opentracing.GlobalTracer()), req) } // ThanosTracerStreamInterceptor injects the opentracing global tracer into the context // in order to get it picked up by Thanos components. -func ThanosTracerStreamInterceptor(srv interface{}, ss grpc.ServerStream, _ *grpc.StreamServerInfo, handler grpc.StreamHandler) error { +func ThanosTracerStreamInterceptor(srv any, ss grpc.ServerStream, _ *grpc.StreamServerInfo, handler grpc.StreamHandler) error { ctx := objstoretracing.ContextWithTracer(ss.Context(), opentracing.GlobalTracer()) return handler(srv, wrappedServerStream{ ctx: tracing.ContextWithTracer(ctx, opentracing.GlobalTracer()), diff --git a/pkg/cortexpb/compat.go b/pkg/cortexpb/compat.go index 6de2423d56..db96d5fac8 100644 --- a/pkg/cortexpb/compat.go +++ b/pkg/cortexpb/compat.go @@ -45,7 +45,7 @@ func ToWriteRequest(lbls []labels.Labels, samples []Sample, metadata []*MetricMe } func (w *WriteRequest) AddHistogramTimeSeries(lbls []labels.Labels, histograms []Histogram) { - for i := 0; i < len(lbls); i++ { + for i := range lbls { ts := TimeseriesFromPool() ts.Labels = append(ts.Labels, FromLabelsToLabelAdapters(lbls[i])...) ts.Histograms = append(ts.Histograms, histograms[i]) @@ -67,13 +67,13 @@ func FromLabelAdaptersToLabels(ls []LabelAdapter) labels.Labels { // Do NOT use unsafe to convert between data types because this function may // get in input labels whose data structure is reused. func FromLabelAdaptersToLabelsWithCopy(input []LabelAdapter) labels.Labels { - return CopyLabels(FromLabelAdaptersToLabels(input)) + return CopyLabels(input) } // Efficiently copies labels input slice. To be used in cases where input slice // can be reused, but long-term copy is needed. -func CopyLabels(input []labels.Label) labels.Labels { - result := make(labels.Labels, len(input)) +func CopyLabels(input []LabelAdapter) labels.Labels { + builder := labels.NewBuilder(labels.EmptyLabels()) size := 0 for _, l := range input { @@ -84,12 +84,14 @@ func CopyLabels(input []labels.Label) labels.Labels { // Copy all strings into the buffer, and use 'yoloString' to convert buffer // slices to strings. buf := make([]byte, size) + var name, value string - for i, l := range input { - result[i].Name, buf = copyStringToBuffer(l.Name, buf) - result[i].Value, buf = copyStringToBuffer(l.Value, buf) + for _, l := range input { + name, buf = copyStringToBuffer(l.Name, buf) + value, buf = copyStringToBuffer(l.Value, buf) + builder.Set(name, value) } - return result + return builder.Labels() } // Copies string to buffer (which must be big enough), and converts buffer slice containing @@ -211,7 +213,7 @@ func (s Sample) MarshalJSON() ([]byte, error) { if err != nil { return nil, err } - return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil + return fmt.Appendf(nil, "[%s,%s]", t, v), nil } // UnmarshalJSON implements json.Unmarshaler. diff --git a/pkg/cortexpb/compat_test.go b/pkg/cortexpb/compat_test.go index 6fda91a84e..336c8ce3fb 100644 --- a/pkg/cortexpb/compat_test.go +++ b/pkg/cortexpb/compat_test.go @@ -23,7 +23,7 @@ func TestStdlibJsonMarshalForSample(t *testing.T) { testMarshalling(t, json.Marshal, "json: error calling MarshalJSON for type cortexpb.Sample: test sample") } -func testMarshalling(t *testing.T, marshalFn func(v interface{}) ([]byte, error), expectedError string) { +func testMarshalling(t *testing.T, marshalFn func(v any) ([]byte, error), expectedError string) { isTesting = true defer func() { isTesting = false }() @@ -51,7 +51,7 @@ func TestStdlibJsonUnmarshalForSample(t *testing.T) { testUnmarshalling(t, json.Unmarshal, "test sample") } -func testUnmarshalling(t *testing.T, unmarshalFn func(data []byte, v interface{}) error, expectedError string) { +func testUnmarshalling(t *testing.T, unmarshalFn func(data []byte, v any) error, expectedError string) { isTesting = true defer func() { isTesting = false }() @@ -104,26 +104,28 @@ func TestMetricMetadataToMetricTypeToMetricType(t *testing.T) { func TestFromLabelAdaptersToLabels(t *testing.T) { input := []LabelAdapter{{Name: "hello", Value: "world"}} - expected := labels.Labels{labels.Label{Name: "hello", Value: "world"}} + expected := labels.FromStrings("hello", "world") actual := FromLabelAdaptersToLabels(input) assert.Equal(t, expected, actual) - // All strings must NOT be copied. - assert.Equal(t, uintptr(unsafe.Pointer(&input[0].Name)), uintptr(unsafe.Pointer(&actual[0].Name))) - assert.Equal(t, uintptr(unsafe.Pointer(&input[0].Value)), uintptr(unsafe.Pointer(&actual[0].Value))) + final := FromLabelsToLabelAdapters(actual) + // All strings must not be copied. + assert.Equal(t, uintptr(unsafe.Pointer(&input[0].Name)), uintptr(unsafe.Pointer(&final[0].Name))) + assert.Equal(t, uintptr(unsafe.Pointer(&input[0].Value)), uintptr(unsafe.Pointer(&final[0].Value))) } func TestFromLabelAdaptersToLabelsWithCopy(t *testing.T) { input := []LabelAdapter{{Name: "hello", Value: "world"}} - expected := labels.Labels{labels.Label{Name: "hello", Value: "world"}} + expected := labels.FromStrings("hello", "world") actual := FromLabelAdaptersToLabelsWithCopy(input) assert.Equal(t, expected, actual) + final := FromLabelsToLabelAdapters(actual) // All strings must be copied. - assert.NotEqual(t, uintptr(unsafe.Pointer(&input[0].Name)), uintptr(unsafe.Pointer(&actual[0].Name))) - assert.NotEqual(t, uintptr(unsafe.Pointer(&input[0].Value)), uintptr(unsafe.Pointer(&actual[0].Value))) + assert.NotEqual(t, uintptr(unsafe.Pointer(&input[0].Name)), uintptr(unsafe.Pointer(&final[0].Name))) + assert.NotEqual(t, uintptr(unsafe.Pointer(&input[0].Value)), uintptr(unsafe.Pointer(&final[0].Value))) } func BenchmarkFromLabelAdaptersToLabelsWithCopy(b *testing.B) { @@ -132,7 +134,7 @@ func BenchmarkFromLabelAdaptersToLabelsWithCopy(b *testing.B) { {Name: "some label", Value: "and its value"}, {Name: "long long long long long label name", Value: "perhaps even longer label value, but who's counting anyway?"}} - for i := 0; i < b.N; i++ { + for b.Loop() { FromLabelAdaptersToLabelsWithCopy(input) } } diff --git a/pkg/cortexpb/cortex.pb.go b/pkg/cortexpb/cortex.pb.go index 04eab395bc..e0dac736ba 100644 --- a/pkg/cortexpb/cortex.pb.go +++ b/pkg/cortexpb/cortex.pb.go @@ -263,6 +263,12 @@ func (m *StreamWriteRequest) GetRequest() *WriteRequest { type WriteResponse struct { Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + // Samples represents X-Prometheus-Remote-Write-Written-Samples + Samples int64 `protobuf:"varint,3,opt,name=Samples,proto3" json:"Samples,omitempty"` + // Histograms represents X-Prometheus-Remote-Write-Written-Histograms + Histograms int64 `protobuf:"varint,4,opt,name=Histograms,proto3" json:"Histograms,omitempty"` + // Exemplars represents X-Prometheus-Remote-Write-Written-Exemplars + Exemplars int64 `protobuf:"varint,5,opt,name=Exemplars,proto3" json:"Exemplars,omitempty"` } func (m *WriteResponse) Reset() { *m = WriteResponse{} } @@ -311,6 +317,27 @@ func (m *WriteResponse) GetMessage() string { return "" } +func (m *WriteResponse) GetSamples() int64 { + if m != nil { + return m.Samples + } + return 0 +} + +func (m *WriteResponse) GetHistograms() int64 { + if m != nil { + return m.Histograms + } + return 0 +} + +func (m *WriteResponse) GetExemplars() int64 { + if m != nil { + return m.Exemplars + } + return 0 +} + type TimeSeries struct { Labels []LabelAdapter `protobuf:"bytes,1,rep,name=labels,proto3,customtype=LabelAdapter" json:"labels"` // Sorted by time, oldest sample first. @@ -945,80 +972,81 @@ func init() { func init() { proto.RegisterFile("cortex.proto", fileDescriptor_893a47d0a749d749) } var fileDescriptor_893a47d0a749d749 = []byte{ - // 1153 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0xcd, 0x6f, 0x1b, 0xc5, - 0x1b, 0xde, 0xc9, 0xfa, 0xf3, 0xb5, 0xe3, 0x6e, 0xe7, 0x17, 0xf5, 0xb7, 0x04, 0x75, 0x9d, 0x2e, - 0x02, 0x2c, 0x84, 0x02, 0x0a, 0x02, 0xd4, 0xaa, 0x20, 0xd9, 0xad, 0xdb, 0x44, 0xad, 0x9d, 0x68, - 0xec, 0x50, 0x95, 0x8b, 0x35, 0xb5, 0xc7, 0xf6, 0xaa, 0xfb, 0xc5, 0xce, 0xb8, 0x6a, 0x38, 0x71, - 0x01, 0x71, 0xe4, 0xcc, 0x0d, 0x71, 0xe1, 0xca, 0x7f, 0xd1, 0x63, 0x8e, 0x55, 0x0f, 0x11, 0x75, - 0x2f, 0xe5, 0xd6, 0x03, 0x7f, 0x00, 0x9a, 0xd9, 0x2f, 0xa7, 0x69, 0xc5, 0x25, 0xb7, 0x79, 0x9f, - 0xf7, 0x79, 0xdf, 0x79, 0xe6, 0xfd, 0x58, 0x1b, 0xea, 0xe3, 0x20, 0x12, 0xec, 0xf1, 0x76, 0x18, - 0x05, 0x22, 0xc0, 0x95, 0xd8, 0x0a, 0x1f, 0x6c, 0x6e, 0xcc, 0x82, 0x59, 0xa0, 0xc0, 0x4f, 0xe4, - 0x29, 0xf6, 0xdb, 0xef, 0xc0, 0xc5, 0x1e, 0xe3, 0x9c, 0xce, 0xd8, 0x3d, 0x47, 0xcc, 0x3b, 0x8b, - 0x29, 0x61, 0xd3, 0x6b, 0x85, 0x57, 0xbf, 0x35, 0x35, 0xfb, 0x47, 0x1d, 0xea, 0xf7, 0x22, 0x47, - 0x30, 0xc2, 0xbe, 0x5b, 0x30, 0x2e, 0xf0, 0x01, 0x80, 0x70, 0x3c, 0xc6, 0x59, 0xe4, 0x30, 0x6e, - 0xa2, 0x2d, 0xbd, 0x55, 0xdb, 0xd9, 0xd8, 0x4e, 0x2f, 0xd8, 0x1e, 0x3a, 0x1e, 0x1b, 0x28, 0x5f, - 0x67, 0xf3, 0xc9, 0x49, 0x53, 0x7b, 0x76, 0xd2, 0xc4, 0x07, 0x11, 0xa3, 0xae, 0x1b, 0x8c, 0x87, - 0x59, 0x1c, 0x59, 0xc9, 0x81, 0xaf, 0x42, 0x69, 0x10, 0x2c, 0xa2, 0x31, 0x33, 0xd7, 0xb6, 0x50, - 0xab, 0xb1, 0x73, 0x25, 0xcf, 0xb6, 0x7a, 0xf3, 0x76, 0x4c, 0xea, 0xfa, 0x0b, 0x8f, 0x24, 0x01, - 0xf8, 0x1a, 0x54, 0x3c, 0x26, 0xe8, 0x84, 0x0a, 0x6a, 0xea, 0x4a, 0x8a, 0x99, 0x07, 0xf7, 0x98, - 0x88, 0x9c, 0x71, 0x2f, 0xf1, 0x77, 0x0a, 0x4f, 0x4e, 0x9a, 0x88, 0x64, 0x7c, 0x7c, 0x1d, 0x36, - 0xf9, 0x43, 0x27, 0x1c, 0xb9, 0xf4, 0x01, 0x73, 0x47, 0x3e, 0xf5, 0xd8, 0xe8, 0x11, 0x75, 0x9d, - 0x09, 0x15, 0x4e, 0xe0, 0x9b, 0x2f, 0xcb, 0x5b, 0xa8, 0x55, 0x21, 0xff, 0x97, 0x94, 0xbb, 0x92, - 0xd1, 0xa7, 0x1e, 0xfb, 0x26, 0xf3, 0xe3, 0x1e, 0xe8, 0x84, 0x4d, 0xcd, 0xbf, 0x25, 0xad, 0xb6, - 0xf3, 0xee, 0xea, 0xad, 0xaf, 0x15, 0xb2, 0x73, 0x59, 0xd6, 0xe1, 0xf8, 0xa4, 0x89, 0x9e, 0x9d, - 0x34, 0xcf, 0xd6, 0x99, 0xc8, 0x3c, 0x76, 0x13, 0x20, 0x7f, 0x1e, 0x2e, 0x83, 0xde, 0x3e, 0xd8, - 0x33, 0x34, 0x5c, 0x81, 0x02, 0x39, 0xbc, 0xdb, 0x35, 0x90, 0xfd, 0x27, 0x02, 0x3c, 0x10, 0x11, - 0xa3, 0xde, 0xa9, 0x6e, 0x6c, 0x42, 0x65, 0xc8, 0x7c, 0xea, 0x8b, 0xbd, 0x9b, 0x26, 0xda, 0x42, - 0xad, 0x2a, 0xc9, 0x6c, 0xfc, 0x29, 0x94, 0x13, 0x9a, 0x2a, 0x6c, 0x6d, 0xe7, 0xd2, 0x9b, 0x0b, - 0x4b, 0x52, 0x5a, 0xfa, 0xa8, 0x97, 0xe7, 0xf4, 0xa8, 0xaf, 0x60, 0x3d, 0xb9, 0x87, 0x87, 0x81, - 0xcf, 0x19, 0xc6, 0x50, 0x18, 0x07, 0x13, 0xa6, 0x94, 0x16, 0x89, 0x3a, 0x63, 0x13, 0xca, 0x5e, - 0x1c, 0xae, 0x54, 0x56, 0x49, 0x6a, 0xda, 0xff, 0x20, 0x80, 0x7c, 0x9c, 0x70, 0x1b, 0x4a, 0xaa, - 0x55, 0xe9, 0xd0, 0xfd, 0x2f, 0x97, 0xa7, 0x1a, 0x74, 0x40, 0x9d, 0xa8, 0xb3, 0x91, 0xcc, 0x5c, - 0x5d, 0x41, 0xed, 0x09, 0x0d, 0x05, 0x8b, 0x48, 0x12, 0x28, 0x2b, 0xc2, 0xa9, 0x17, 0xba, 0x8c, - 0x9b, 0x6b, 0x2a, 0x87, 0x91, 0xe7, 0x18, 0x28, 0x87, 0x9a, 0x12, 0x8d, 0xa4, 0x34, 0xfc, 0x05, - 0x54, 0xd9, 0x63, 0xe6, 0x85, 0x2e, 0x8d, 0x78, 0x32, 0x61, 0x38, 0x8f, 0xe9, 0x26, 0xae, 0x24, - 0x2a, 0xa7, 0xe2, 0xab, 0x00, 0x73, 0x87, 0x8b, 0x60, 0x16, 0x51, 0x8f, 0x9b, 0x85, 0xd7, 0x05, - 0xef, 0xa6, 0xbe, 0x24, 0x72, 0x85, 0x6c, 0x7f, 0x0e, 0xd5, 0xec, 0x3d, 0xb2, 0x62, 0x72, 0x32, - 0x55, 0xc5, 0xea, 0x44, 0x9d, 0xf1, 0x06, 0x14, 0x1f, 0x51, 0x77, 0x11, 0xd7, 0xab, 0x4e, 0x62, - 0xc3, 0x6e, 0x43, 0x29, 0x7e, 0x42, 0xee, 0x97, 0x41, 0x28, 0xf1, 0xe3, 0x2b, 0x50, 0x57, 0x3b, - 0x27, 0xa8, 0x17, 0x8e, 0x3c, 0xae, 0x82, 0x75, 0x52, 0xcb, 0xb0, 0x1e, 0xb7, 0x7f, 0x5d, 0x83, - 0xc6, 0xe9, 0xa5, 0xc1, 0x5f, 0x42, 0x41, 0x1c, 0x85, 0x71, 0xaa, 0xc6, 0xce, 0x7b, 0x6f, 0x5b, - 0xae, 0xc4, 0x1c, 0x1e, 0x85, 0x8c, 0xa8, 0x00, 0xfc, 0x31, 0x60, 0x4f, 0x61, 0xa3, 0x29, 0xf5, - 0x1c, 0xf7, 0x48, 0x2d, 0x58, 0xd2, 0x61, 0x23, 0xf6, 0xdc, 0x52, 0x0e, 0xb9, 0x57, 0xf2, 0x99, - 0x73, 0xe6, 0x86, 0x66, 0x41, 0xf9, 0xd5, 0x59, 0x62, 0x0b, 0xdf, 0x11, 0x66, 0x31, 0xc6, 0xe4, - 0xd9, 0x3e, 0x02, 0xc8, 0x6f, 0xc2, 0x35, 0x28, 0x1f, 0xf6, 0xef, 0xf4, 0xf7, 0xef, 0xf5, 0x0d, - 0x4d, 0x1a, 0x37, 0xf6, 0x0f, 0xfb, 0xc3, 0x2e, 0x31, 0x10, 0xae, 0x42, 0xf1, 0x76, 0xfb, 0xf0, - 0x76, 0xd7, 0x58, 0xc3, 0xeb, 0x50, 0xdd, 0xdd, 0x1b, 0x0c, 0xf7, 0x6f, 0x93, 0x76, 0xcf, 0xd0, - 0x31, 0x86, 0x86, 0xf2, 0xe4, 0x58, 0x41, 0x86, 0x0e, 0x0e, 0x7b, 0xbd, 0x36, 0xb9, 0x6f, 0x14, - 0xe5, 0xca, 0xed, 0xf5, 0x6f, 0xed, 0x1b, 0x25, 0x5c, 0x87, 0xca, 0x60, 0xd8, 0x1e, 0x76, 0x07, - 0xdd, 0xa1, 0x51, 0xb6, 0xef, 0x40, 0x29, 0xbe, 0xfa, 0x1c, 0x06, 0xd1, 0xfe, 0x09, 0x41, 0x25, - 0x1d, 0x9e, 0xf3, 0x18, 0xec, 0x53, 0x23, 0xf1, 0xd6, 0x96, 0xeb, 0x67, 0x5b, 0x7e, 0x5c, 0x84, - 0x6a, 0x36, 0x8c, 0xf8, 0x32, 0x54, 0xc7, 0xc1, 0xc2, 0x17, 0x23, 0xc7, 0x17, 0xaa, 0xe5, 0x85, - 0x5d, 0x8d, 0x54, 0x14, 0xb4, 0xe7, 0x0b, 0x7c, 0x05, 0x6a, 0xb1, 0x7b, 0xea, 0x06, 0x34, 0xfe, - 0xa8, 0xa0, 0x5d, 0x8d, 0x80, 0x02, 0x6f, 0x49, 0x0c, 0x1b, 0xa0, 0xf3, 0x85, 0xa7, 0x6e, 0x42, - 0x44, 0x1e, 0xf1, 0x25, 0x28, 0xf1, 0xf1, 0x9c, 0x79, 0x54, 0x35, 0xf7, 0x22, 0x49, 0x2c, 0xfc, - 0x3e, 0x34, 0xbe, 0x67, 0x51, 0x30, 0x12, 0xf3, 0x88, 0xf1, 0x79, 0xe0, 0x4e, 0x54, 0xa3, 0x11, - 0x59, 0x97, 0xe8, 0x30, 0x05, 0xf1, 0x07, 0x09, 0x2d, 0xd7, 0x55, 0x52, 0xba, 0x10, 0xa9, 0x4b, - 0xfc, 0x46, 0xaa, 0xed, 0x23, 0x30, 0x56, 0x78, 0xb1, 0xc0, 0xb2, 0x12, 0x88, 0x48, 0x23, 0x63, - 0xc6, 0x22, 0xdb, 0xd0, 0xf0, 0xd9, 0x8c, 0x0a, 0xe7, 0x11, 0x1b, 0xf1, 0x90, 0xfa, 0xdc, 0xac, - 0xbc, 0xfe, 0x33, 0xd6, 0x59, 0x8c, 0x1f, 0x32, 0x31, 0x08, 0xa9, 0x9f, 0x6c, 0xe8, 0x7a, 0x1a, - 0x21, 0x31, 0x8e, 0x3f, 0x84, 0x0b, 0x59, 0x8a, 0x09, 0x73, 0x05, 0xe5, 0x66, 0x75, 0x4b, 0x6f, - 0x61, 0x92, 0x65, 0xbe, 0xa9, 0xd0, 0x53, 0x44, 0xa5, 0x8d, 0x9b, 0xb0, 0xa5, 0xb7, 0x50, 0x4e, - 0x54, 0xc2, 0xe4, 0xe7, 0xad, 0x11, 0x06, 0xdc, 0x59, 0x11, 0x55, 0xfb, 0x6f, 0x51, 0x69, 0x44, - 0x26, 0x2a, 0x4b, 0x91, 0x88, 0xaa, 0xc7, 0xa2, 0x52, 0x38, 0x17, 0x95, 0x11, 0x13, 0x51, 0xeb, - 0xb1, 0xa8, 0x14, 0x4e, 0x44, 0x5d, 0x07, 0x88, 0x18, 0x67, 0x62, 0x34, 0x97, 0x95, 0x6f, 0xa8, - 0x8f, 0xc0, 0xe5, 0x37, 0x7c, 0xc6, 0xb6, 0x89, 0x64, 0xed, 0x3a, 0xbe, 0x20, 0xd5, 0x28, 0x3d, - 0x9e, 0x99, 0xbf, 0x0b, 0x67, 0xe7, 0xef, 0x1a, 0x54, 0xb3, 0xd0, 0xd3, 0xfb, 0x5c, 0x06, 0xfd, - 0x7e, 0x77, 0x60, 0x20, 0x5c, 0x82, 0xb5, 0xfe, 0xbe, 0xb1, 0x96, 0xef, 0xb4, 0xbe, 0x59, 0xf8, - 0xf9, 0x77, 0x0b, 0x75, 0xca, 0x50, 0x54, 0xe2, 0x3b, 0x75, 0x80, 0xbc, 0xf7, 0xf6, 0x75, 0x80, - 0xbc, 0x50, 0x72, 0xfc, 0x82, 0xe9, 0x94, 0xb3, 0x78, 0x9e, 0x2f, 0x92, 0xc4, 0x92, 0xb8, 0xcb, - 0xfc, 0x99, 0x98, 0xab, 0x31, 0x5e, 0x27, 0x89, 0xd5, 0xf9, 0xfa, 0xf8, 0xb9, 0xa5, 0x3d, 0x7d, - 0x6e, 0x69, 0xaf, 0x9e, 0x5b, 0xe8, 0x87, 0xa5, 0x85, 0xfe, 0x58, 0x5a, 0xe8, 0xc9, 0xd2, 0x42, - 0xc7, 0x4b, 0x0b, 0xfd, 0xb5, 0xb4, 0xd0, 0xcb, 0xa5, 0xa5, 0xbd, 0x5a, 0x5a, 0xe8, 0x97, 0x17, - 0x96, 0x76, 0xfc, 0xc2, 0xd2, 0x9e, 0xbe, 0xb0, 0xb4, 0x6f, 0xb3, 0x3f, 0x58, 0x0f, 0x4a, 0xea, - 0x1f, 0xd5, 0x67, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x82, 0x66, 0x44, 0xf2, 0x81, 0x09, 0x00, - 0x00, + // 1183 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0xbd, 0x8f, 0x1b, 0x45, + 0x14, 0xdf, 0xb9, 0xf5, 0xd7, 0x3e, 0xfb, 0x9c, 0xcd, 0x70, 0x0a, 0xcb, 0x41, 0xd6, 0x8e, 0x11, + 0x60, 0x21, 0x74, 0xa0, 0x43, 0x80, 0x12, 0x45, 0x48, 0x76, 0xe2, 0xe4, 0x4e, 0x89, 0x7d, 0xa7, + 0xb1, 0x8f, 0x28, 0x34, 0xd6, 0xc4, 0x37, 0xb6, 0x57, 0xd9, 0x2f, 0x76, 0xc6, 0x51, 0x8e, 0x8a, + 0x06, 0x44, 0x49, 0x43, 0x43, 0x87, 0x68, 0x68, 0xf9, 0x2f, 0x52, 0x5e, 0x19, 0xa5, 0x38, 0x11, + 0xa7, 0x09, 0x5d, 0x0a, 0xfe, 0x00, 0x34, 0xb3, 0x5f, 0xbe, 0x5c, 0x22, 0x9a, 0x74, 0xf3, 0x7e, + 0xef, 0x63, 0x7e, 0xf3, 0xde, 0xef, 0xad, 0x0d, 0xb5, 0x49, 0x10, 0x09, 0xf6, 0x70, 0x2b, 0x8c, + 0x02, 0x11, 0xe0, 0x4a, 0x6c, 0x85, 0xf7, 0x36, 0x37, 0x66, 0xc1, 0x2c, 0x50, 0xe0, 0xa7, 0xf2, + 0x14, 0xfb, 0x5b, 0xef, 0xc0, 0xf9, 0x3e, 0xe3, 0x9c, 0xce, 0xd8, 0x1d, 0x47, 0xcc, 0xbb, 0x8b, + 0x29, 0x61, 0xd3, 0x2b, 0x85, 0x17, 0xbf, 0x37, 0xb4, 0xd6, 0x8f, 0x3a, 0xd4, 0xee, 0x44, 0x8e, + 0x60, 0x84, 0x7d, 0xb7, 0x60, 0x5c, 0xe0, 0x7d, 0x00, 0xe1, 0x78, 0x8c, 0xb3, 0xc8, 0x61, 0xdc, + 0x42, 0x4d, 0xbd, 0x5d, 0xdd, 0xde, 0xd8, 0x4a, 0x2f, 0xd8, 0x1a, 0x39, 0x1e, 0x1b, 0x2a, 0x5f, + 0x77, 0xf3, 0xd1, 0x49, 0x43, 0x7b, 0x72, 0xd2, 0xc0, 0xfb, 0x11, 0xa3, 0xae, 0x1b, 0x4c, 0x46, + 0x59, 0x1e, 0x59, 0xa9, 0x81, 0x2f, 0x43, 0x69, 0x18, 0x2c, 0xa2, 0x09, 0xb3, 0xd6, 0x9a, 0xa8, + 0x5d, 0xdf, 0xbe, 0x94, 0x57, 0x5b, 0xbd, 0x79, 0x2b, 0x0e, 0xea, 0xf9, 0x0b, 0x8f, 0x24, 0x09, + 0xf8, 0x0a, 0x54, 0x3c, 0x26, 0xe8, 0x21, 0x15, 0xd4, 0xd2, 0x15, 0x15, 0x2b, 0x4f, 0xee, 0x33, + 0x11, 0x39, 0x93, 0x7e, 0xe2, 0xef, 0x16, 0x1e, 0x9d, 0x34, 0x10, 0xc9, 0xe2, 0xf1, 0x55, 0xd8, + 0xe4, 0xf7, 0x9d, 0x70, 0xec, 0xd2, 0x7b, 0xcc, 0x1d, 0xfb, 0xd4, 0x63, 0xe3, 0x07, 0xd4, 0x75, + 0x0e, 0xa9, 0x70, 0x02, 0xdf, 0x7a, 0x5e, 0x6e, 0xa2, 0x76, 0x85, 0xbc, 0x2d, 0x43, 0x6e, 0xcb, + 0x88, 0x01, 0xf5, 0xd8, 0x37, 0x99, 0x1f, 0xf7, 0x41, 0x27, 0x6c, 0x6a, 0xfd, 0x23, 0xc3, 0xaa, + 0xdb, 0xef, 0xae, 0xde, 0xfa, 0x52, 0x23, 0xbb, 0x17, 0x65, 0x1f, 0x8e, 0x4f, 0x1a, 0xe8, 0xc9, + 0x49, 0xe3, 0x6c, 0x9f, 0x89, 0xac, 0xd3, 0x6a, 0x00, 0xe4, 0xcf, 0xc3, 0x65, 0xd0, 0x3b, 0xfb, + 0xbb, 0xa6, 0x86, 0x2b, 0x50, 0x20, 0x07, 0xb7, 0x7b, 0x26, 0x6a, 0xfd, 0x85, 0x00, 0x0f, 0x45, + 0xc4, 0xa8, 0x77, 0x6a, 0x1a, 0x9b, 0x50, 0x19, 0x31, 0x9f, 0xfa, 0x62, 0xf7, 0xba, 0x85, 0x9a, + 0xa8, 0x6d, 0x90, 0xcc, 0xc6, 0x9f, 0x41, 0x39, 0x09, 0x53, 0x8d, 0xad, 0x6e, 0x5f, 0x78, 0x75, + 0x63, 0x49, 0x1a, 0x96, 0x3e, 0xea, 0xf9, 0x1b, 0x7a, 0xd4, 0xaf, 0x08, 0xd6, 0x93, 0x8b, 0x78, + 0x18, 0xf8, 0x9c, 0x61, 0x0c, 0x85, 0x49, 0x70, 0xc8, 0x14, 0xd5, 0x22, 0x51, 0x67, 0x6c, 0x41, + 0xd9, 0x8b, 0xf3, 0x15, 0x4d, 0x83, 0xa4, 0xa6, 0xf4, 0x0c, 0xa9, 0x17, 0xba, 0x8c, 0x5b, 0x7a, + 0x13, 0xb5, 0x75, 0x92, 0x9a, 0xd8, 0x06, 0xd8, 0x71, 0xb8, 0x08, 0x66, 0x11, 0xf5, 0xb8, 0x55, + 0x50, 0xce, 0x15, 0x04, 0xbf, 0x07, 0x46, 0xef, 0x21, 0xf3, 0x42, 0x97, 0x46, 0xdc, 0x2a, 0x2a, + 0x77, 0x0e, 0xb4, 0xfe, 0x45, 0x00, 0xb9, 0x4e, 0x71, 0x07, 0x4a, 0x4a, 0x03, 0xa9, 0x9a, 0xdf, + 0xca, 0xdf, 0xad, 0x26, 0xbf, 0x4f, 0x9d, 0xa8, 0xbb, 0x91, 0x88, 0xb9, 0xa6, 0xa0, 0xce, 0x21, + 0x0d, 0x05, 0x8b, 0x48, 0x92, 0x28, 0x5b, 0xcd, 0x13, 0xa6, 0x6b, 0xaa, 0x86, 0x99, 0xd7, 0x88, + 0x39, 0x2b, 0xf9, 0x69, 0x24, 0x0d, 0xc3, 0x5f, 0x82, 0xc1, 0x32, 0x86, 0xb1, 0x74, 0x71, 0x9e, + 0x93, 0x72, 0x4d, 0xb2, 0xf2, 0x50, 0x7c, 0x19, 0x60, 0xbe, 0xfa, 0xf2, 0x97, 0x08, 0x67, 0x3d, + 0x48, 0x32, 0x57, 0x82, 0x5b, 0x5f, 0x80, 0x91, 0xbd, 0x47, 0x4e, 0x42, 0x4a, 0x5e, 0x4d, 0xa2, + 0x46, 0xd4, 0x19, 0x6f, 0x40, 0xf1, 0x01, 0x75, 0x17, 0xf1, 0x1c, 0x6a, 0x24, 0x36, 0x5a, 0x1d, + 0x28, 0xc5, 0x4f, 0xc8, 0xfd, 0x32, 0x09, 0x25, 0x7e, 0x7c, 0x09, 0x6a, 0x6a, 0x99, 0x05, 0xf5, + 0xc2, 0xb1, 0xc7, 0x55, 0xb2, 0x4e, 0xaa, 0x19, 0xd6, 0xe7, 0xad, 0xdf, 0xd6, 0xa0, 0x7e, 0x7a, + 0x1b, 0xf1, 0x57, 0x50, 0x10, 0x47, 0x61, 0x5c, 0xaa, 0xbe, 0xfd, 0xfe, 0xeb, 0xb6, 0x36, 0x31, + 0x47, 0x47, 0x21, 0x23, 0x2a, 0x01, 0x7f, 0x02, 0xd8, 0x53, 0xd8, 0x78, 0x4a, 0x3d, 0xc7, 0x3d, + 0x52, 0x9b, 0x9b, 0x28, 0xc7, 0x8c, 0x3d, 0x37, 0x94, 0x43, 0x2e, 0xac, 0x7c, 0xe6, 0x9c, 0xb9, + 0xa1, 0x92, 0x88, 0x41, 0xd4, 0x59, 0x62, 0x0b, 0xdf, 0x11, 0x4a, 0x17, 0x06, 0x51, 0xe7, 0xd6, + 0x11, 0x40, 0x7e, 0x13, 0xae, 0x42, 0xf9, 0x60, 0x70, 0x6b, 0xb0, 0x77, 0x67, 0x60, 0x6a, 0xd2, + 0xb8, 0xb6, 0x77, 0x30, 0x18, 0xf5, 0x88, 0x89, 0xb0, 0x01, 0xc5, 0x9b, 0x9d, 0x83, 0x9b, 0x3d, + 0x73, 0x0d, 0xaf, 0x83, 0xb1, 0xb3, 0x3b, 0x1c, 0xed, 0xdd, 0x24, 0x9d, 0xbe, 0xa9, 0x63, 0x0c, + 0x75, 0xe5, 0xc9, 0xb1, 0x82, 0x4c, 0x1d, 0x1e, 0xf4, 0xfb, 0x1d, 0x72, 0xd7, 0x2c, 0xca, 0x5d, + 0xde, 0x1d, 0xdc, 0xd8, 0x33, 0x4b, 0xb8, 0x06, 0x95, 0xe1, 0xa8, 0x33, 0xea, 0x0d, 0x7b, 0x23, + 0xb3, 0xdc, 0xba, 0x05, 0xa5, 0xf8, 0xea, 0x37, 0x20, 0xc4, 0xd6, 0x4f, 0x08, 0x2a, 0xa9, 0x78, + 0xde, 0x84, 0xb0, 0x4f, 0x49, 0xe2, 0xb5, 0x23, 0xd7, 0xcf, 0x8e, 0xfc, 0xb8, 0x08, 0x46, 0x26, + 0x46, 0x7c, 0x11, 0x8c, 0x49, 0xb0, 0xf0, 0xc5, 0xd8, 0xf1, 0x85, 0x1a, 0x79, 0x61, 0x47, 0x23, + 0x15, 0x05, 0xed, 0xfa, 0x02, 0x5f, 0x82, 0x6a, 0xec, 0x9e, 0xba, 0x01, 0x8d, 0xbf, 0x56, 0x68, + 0x47, 0x23, 0xa0, 0xc0, 0x1b, 0x12, 0xc3, 0x26, 0xe8, 0x7c, 0xe1, 0xa9, 0x9b, 0x10, 0x91, 0x47, + 0x7c, 0x01, 0x4a, 0x7c, 0x32, 0x67, 0x1e, 0x55, 0xc3, 0x3d, 0x4f, 0x12, 0x0b, 0x7f, 0x00, 0xf5, + 0xef, 0x59, 0x14, 0x8c, 0xc5, 0x3c, 0x62, 0x7c, 0x1e, 0xb8, 0x87, 0x6a, 0xd0, 0x88, 0xac, 0x4b, + 0x74, 0x94, 0x82, 0xf8, 0xc3, 0x24, 0x2c, 0xe7, 0x55, 0x52, 0xbc, 0x10, 0xa9, 0x49, 0xfc, 0x5a, + 0xca, 0xed, 0x63, 0x30, 0x57, 0xe2, 0x62, 0x82, 0x65, 0x45, 0x10, 0x91, 0x7a, 0x16, 0x19, 0x93, + 0xec, 0x40, 0xdd, 0x67, 0x33, 0x2a, 0x9c, 0x07, 0x6c, 0xcc, 0x43, 0xea, 0x73, 0xab, 0xf2, 0xf2, + 0xef, 0x63, 0x77, 0x31, 0xb9, 0xcf, 0xc4, 0x30, 0xa4, 0x7e, 0xb2, 0xa1, 0xeb, 0x69, 0x86, 0xc4, + 0x38, 0xfe, 0x08, 0xce, 0x65, 0x25, 0x0e, 0x99, 0x2b, 0x28, 0xb7, 0x8c, 0xa6, 0xde, 0xc6, 0x24, + 0xab, 0x7c, 0x5d, 0xa1, 0xa7, 0x02, 0x15, 0x37, 0x6e, 0x41, 0x53, 0x6f, 0xa3, 0x3c, 0x50, 0x11, + 0x93, 0x9f, 0xb7, 0x7a, 0x18, 0x70, 0x67, 0x85, 0x54, 0xf5, 0xff, 0x49, 0xa5, 0x19, 0x19, 0xa9, + 0xac, 0x44, 0x42, 0xaa, 0x16, 0x93, 0x4a, 0xe1, 0x9c, 0x54, 0x16, 0x98, 0x90, 0x5a, 0x8f, 0x49, + 0xa5, 0x70, 0x42, 0xea, 0x2a, 0x40, 0xc4, 0x38, 0x13, 0xe3, 0xb9, 0xec, 0x7c, 0x5d, 0x7d, 0x04, + 0x2e, 0xbe, 0xe2, 0x33, 0xb6, 0x45, 0x64, 0xd4, 0x8e, 0xe3, 0x0b, 0x62, 0x44, 0xe9, 0xf1, 0x8c, + 0xfe, 0xce, 0x9d, 0xd5, 0xdf, 0x15, 0x30, 0xb2, 0xd4, 0xd3, 0xfb, 0x5c, 0x06, 0xfd, 0x6e, 0x6f, + 0x68, 0x22, 0x5c, 0x82, 0xb5, 0xc1, 0x9e, 0xb9, 0x96, 0xef, 0xb4, 0xbe, 0x59, 0xf8, 0xf9, 0x0f, + 0x1b, 0x75, 0xcb, 0x50, 0x54, 0xe4, 0xbb, 0x35, 0x80, 0x7c, 0xf6, 0xad, 0xab, 0x00, 0x79, 0xa3, + 0xa4, 0xfc, 0x82, 0xe9, 0x94, 0xb3, 0x58, 0xcf, 0xe7, 0x49, 0x62, 0x49, 0xdc, 0x65, 0xfe, 0x4c, + 0xcc, 0x95, 0x8c, 0xd7, 0x49, 0x62, 0x75, 0xbf, 0x3e, 0x7e, 0x6a, 0x6b, 0x8f, 0x9f, 0xda, 0xda, + 0x8b, 0xa7, 0x36, 0xfa, 0x61, 0x69, 0xa3, 0x3f, 0x97, 0x36, 0x7a, 0xb4, 0xb4, 0xd1, 0xf1, 0xd2, + 0x46, 0x7f, 0x2f, 0x6d, 0xf4, 0x7c, 0x69, 0x6b, 0x2f, 0x96, 0x36, 0xfa, 0xe5, 0x99, 0xad, 0x1d, + 0x3f, 0xb3, 0xb5, 0xc7, 0xcf, 0x6c, 0xed, 0xdb, 0xec, 0x9f, 0xdb, 0xbd, 0x92, 0xfa, 0xab, 0xf6, + 0xf9, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x4e, 0x10, 0xfc, 0x83, 0xda, 0x09, 0x00, 0x00, } func (x WriteRequest_SourceEnum) String() string { @@ -1164,6 +1192,15 @@ func (this *WriteResponse) Equal(that interface{}) bool { if this.Message != that1.Message { return false } + if this.Samples != that1.Samples { + return false + } + if this.Histograms != that1.Histograms { + return false + } + if this.Exemplars != that1.Exemplars { + return false + } return true } func (this *TimeSeries) Equal(that interface{}) bool { @@ -1638,10 +1675,13 @@ func (this *WriteResponse) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 6) + s := make([]string, 0, 9) s = append(s, "&cortexpb.WriteResponse{") s = append(s, "Code: "+fmt.Sprintf("%#v", this.Code)+",\n") s = append(s, "Message: "+fmt.Sprintf("%#v", this.Message)+",\n") + s = append(s, "Samples: "+fmt.Sprintf("%#v", this.Samples)+",\n") + s = append(s, "Histograms: "+fmt.Sprintf("%#v", this.Histograms)+",\n") + s = append(s, "Exemplars: "+fmt.Sprintf("%#v", this.Exemplars)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -1999,6 +2039,21 @@ func (m *WriteResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Exemplars != 0 { + i = encodeVarintCortex(dAtA, i, uint64(m.Exemplars)) + i-- + dAtA[i] = 0x28 + } + if m.Histograms != 0 { + i = encodeVarintCortex(dAtA, i, uint64(m.Histograms)) + i-- + dAtA[i] = 0x20 + } + if m.Samples != 0 { + i = encodeVarintCortex(dAtA, i, uint64(m.Samples)) + i-- + dAtA[i] = 0x18 + } if len(m.Message) > 0 { i -= len(m.Message) copy(dAtA[i:], m.Message) @@ -2612,6 +2667,15 @@ func (m *WriteResponse) Size() (n int) { if l > 0 { n += 1 + l + sovCortex(uint64(l)) } + if m.Samples != 0 { + n += 1 + sovCortex(uint64(m.Samples)) + } + if m.Histograms != 0 { + n += 1 + sovCortex(uint64(m.Histograms)) + } + if m.Exemplars != 0 { + n += 1 + sovCortex(uint64(m.Exemplars)) + } return n } @@ -2906,6 +2970,9 @@ func (this *WriteResponse) String() string { s := strings.Join([]string{`&WriteResponse{`, `Code:` + fmt.Sprintf("%v", this.Code) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Samples:` + fmt.Sprintf("%v", this.Samples) + `,`, + `Histograms:` + fmt.Sprintf("%v", this.Histograms) + `,`, + `Exemplars:` + fmt.Sprintf("%v", this.Exemplars) + `,`, `}`, }, "") return s @@ -3566,6 +3633,63 @@ func (m *WriteResponse) Unmarshal(dAtA []byte) error { } m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Samples", wireType) + } + m.Samples = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Samples |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Histograms", wireType) + } + m.Histograms = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Histograms |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType) + } + m.Exemplars = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Exemplars |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipCortex(dAtA[iNdEx:]) diff --git a/pkg/cortexpb/cortex.proto b/pkg/cortexpb/cortex.proto index f2995afbf2..fa2caf287c 100644 --- a/pkg/cortexpb/cortex.proto +++ b/pkg/cortexpb/cortex.proto @@ -36,6 +36,12 @@ message StreamWriteRequest { message WriteResponse { int32 code = 1; string message = 2; + // Samples represents X-Prometheus-Remote-Write-Written-Samples + int64 Samples = 3; + // Histograms represents X-Prometheus-Remote-Write-Written-Histograms + int64 Histograms = 4; + // Exemplars represents X-Prometheus-Remote-Write-Written-Exemplars + int64 Exemplars = 5; } message TimeSeries { diff --git a/pkg/cortexpb/extensions.go b/pkg/cortexpb/extensions.go index 716fafcc79..e75b45e2ae 100644 --- a/pkg/cortexpb/extensions.go +++ b/pkg/cortexpb/extensions.go @@ -15,7 +15,7 @@ const maxBufferSize = 1024 const signVersion = "v1" var signerPool = sync.Pool{ - New: func() interface{} { + New: func() any { return newSigner() }, } diff --git a/pkg/cortexpb/extensions_test.go b/pkg/cortexpb/extensions_test.go index 94a5f76d48..158d67e929 100644 --- a/pkg/cortexpb/extensions_test.go +++ b/pkg/cortexpb/extensions_test.go @@ -26,9 +26,8 @@ func BenchmarkSignRequest(b *testing.B) { for _, tc := range tests { b.Run(fmt.Sprintf("WriteRequestSize: %v", tc.size), func(b *testing.B) { wr := createWriteRequest(tc.size, true, "family1", "help1", "unit") - b.ResetTimer() b.ReportAllocs() - for i := 0; i < b.N; i++ { + for b.Loop() { _, err := wr.Sign(ctx) require.NoError(b, err) } @@ -72,7 +71,7 @@ func TestWriteRequest_Sign(t *testing.T) { itNumber := 1000 wg := sync.WaitGroup{} wg.Add(itNumber) - for i := 0; i < itNumber; i++ { + for range itNumber { go func() { defer wg.Done() s, err := tc.w.Sign(ctx) @@ -96,7 +95,7 @@ func createWriteRequest(numTs int, exemplar bool, family string, help string, un }, } - for i := 0; i < numTs; i++ { + for i := range numTs { w.Timeseries = append(w.Timeseries, PreallocTimeseries{ TimeSeries: &TimeSeries{ Labels: []LabelAdapter{ diff --git a/pkg/cortexpb/histograms.go b/pkg/cortexpb/histograms.go index 60e7207a19..aa13f27608 100644 --- a/pkg/cortexpb/histograms.go +++ b/pkg/cortexpb/histograms.go @@ -16,6 +16,7 @@ package cortexpb import ( "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/prompb" + writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2" ) func (h Histogram) IsFloatHistogram() bool { @@ -23,6 +24,30 @@ func (h Histogram) IsFloatHistogram() bool { return ok } +func HistogramWriteV2ProtoToHistogramProto(h writev2.Histogram) Histogram { + ph := Histogram{ + Sum: h.Sum, + Schema: h.Schema, + ZeroThreshold: h.ZeroThreshold, + NegativeSpans: spansWriteV2ProtoToSpansProto(h.NegativeSpans), + NegativeDeltas: h.NegativeDeltas, + NegativeCounts: h.NegativeCounts, + PositiveSpans: spansWriteV2ProtoToSpansProto(h.PositiveSpans), + PositiveDeltas: h.PositiveDeltas, + PositiveCounts: h.PositiveCounts, + ResetHint: Histogram_ResetHint(h.ResetHint), + TimestampMs: h.Timestamp, + } + if h.IsFloatHistogram() { + ph.Count = &Histogram_CountFloat{CountFloat: h.GetCountFloat()} + ph.ZeroCount = &Histogram_ZeroCountFloat{ZeroCountFloat: h.GetZeroCountFloat()} + } else { + ph.Count = &Histogram_CountInt{CountInt: h.GetCountInt()} + ph.ZeroCount = &Histogram_ZeroCountInt{ZeroCountInt: h.GetZeroCountInt()} + } + return ph +} + // HistogramPromProtoToHistogramProto converts a prometheus protobuf Histogram to cortex protobuf Histogram. func HistogramPromProtoToHistogramProto(h prompb.Histogram) Histogram { ph := Histogram{ @@ -131,7 +156,7 @@ func FloatHistogramToHistogramProto(timestamp int64, fh *histogram.FloatHistogra func spansProtoToSpans(s []BucketSpan) []histogram.Span { spans := make([]histogram.Span, len(s)) - for i := 0; i < len(s); i++ { + for i := range s { spans[i] = histogram.Span{Offset: s[i].Offset, Length: s[i].Length} } @@ -140,7 +165,7 @@ func spansProtoToSpans(s []BucketSpan) []histogram.Span { func spansToSpansProto(s []histogram.Span) []BucketSpan { spans := make([]BucketSpan, len(s)) - for i := 0; i < len(s); i++ { + for i := range s { spans[i] = BucketSpan{Offset: s[i].Offset, Length: s[i].Length} } @@ -149,7 +174,16 @@ func spansToSpansProto(s []histogram.Span) []BucketSpan { func spansPromProtoToSpansProto(s []prompb.BucketSpan) []BucketSpan { spans := make([]BucketSpan, len(s)) - for i := 0; i < len(s); i++ { + for i := range s { + spans[i] = BucketSpan{Offset: s[i].Offset, Length: s[i].Length} + } + + return spans +} + +func spansWriteV2ProtoToSpansProto(s []writev2.BucketSpan) []BucketSpan { + spans := make([]BucketSpan, len(s)) + for i := range s { spans[i] = BucketSpan{Offset: s[i].Offset, Length: s[i].Length} } diff --git a/pkg/cortexpb/signature.go b/pkg/cortexpb/signature.go index 42343e6f4c..a11c5bcd02 100644 --- a/pkg/cortexpb/signature.go +++ b/pkg/cortexpb/signature.go @@ -9,7 +9,7 @@ import ( // Ref: https://github.com/prometheus/common/blob/main/model/fnv.go func LabelsToFingerprint(lset labels.Labels) model.Fingerprint { - if len(lset) == 0 { + if lset.Len() == 0 { return model.Fingerprint(hashNew()) } diff --git a/pkg/cortexpb/slicesPool.go b/pkg/cortexpb/slicesPool.go index e28d51d4f2..c0f3a2c7c3 100644 --- a/pkg/cortexpb/slicesPool.go +++ b/pkg/cortexpb/slicesPool.go @@ -21,10 +21,10 @@ func newSlicePool(pools int) *byteSlicePools { func (sp *byteSlicePools) init(pools int) { sp.pools = make([]sync.Pool, pools) - for i := 0; i < pools; i++ { + for i := range pools { size := int(math.Pow(2, float64(i+minPoolSizePower))) sp.pools[i] = sync.Pool{ - New: func() interface{} { + New: func() any { buf := make([]byte, 0, size) return &buf }, diff --git a/pkg/cortexpb/slicesPool_test.go b/pkg/cortexpb/slicesPool_test.go index 9bc56cdec3..d5f3f0a1c6 100644 --- a/pkg/cortexpb/slicesPool_test.go +++ b/pkg/cortexpb/slicesPool_test.go @@ -12,7 +12,7 @@ func TestFuzzyByteSlicePools(t *testing.T) { sut := newSlicePool(20) maxByteSize := int(math.Pow(2, 20+minPoolSizePower-1)) - for i := 0; i < 1000; i++ { + for range 1000 { size := rand.Int() % maxByteSize s := sut.getSlice(size) assert.Equal(t, len(*s), size) diff --git a/pkg/cortexpb/timeseries.go b/pkg/cortexpb/timeseries.go index db7354ffe4..4d780bba6a 100644 --- a/pkg/cortexpb/timeseries.go +++ b/pkg/cortexpb/timeseries.go @@ -24,13 +24,13 @@ var ( is re-used. But since the slices are far far larger, we come out ahead. */ slicePool = sync.Pool{ - New: func() interface{} { + New: func() any { return make([]PreallocTimeseries, 0, expectedTimeseries) }, } timeSeriesPool = sync.Pool{ - New: func() interface{} { + New: func() any { return &TimeSeries{ Labels: make([]LabelAdapter, 0, expectedLabels), Samples: make([]Sample, 0, expectedSamplesPerSeries), @@ -41,7 +41,7 @@ var ( } writeRequestPool = sync.Pool{ - New: func() interface{} { + New: func() any { return &PreallocWriteRequest{ WriteRequest: WriteRequest{}, } diff --git a/pkg/cortexpb/timeseries_test.go b/pkg/cortexpb/timeseries_test.go index abba35a88d..6194b7e994 100644 --- a/pkg/cortexpb/timeseries_test.go +++ b/pkg/cortexpb/timeseries_test.go @@ -70,7 +70,7 @@ func TestTimeseriesFromPool(t *testing.T) { func BenchmarkMarshallWriteRequest(b *testing.B) { ts := PreallocTimeseriesSliceFromPool() - for i := 0; i < 100; i++ { + for i := range 100 { ts = append(ts, PreallocTimeseries{TimeSeries: TimeseriesFromPool()}) ts[i].Labels = []LabelAdapter{ {Name: "foo", Value: "bar"}, @@ -85,14 +85,14 @@ func BenchmarkMarshallWriteRequest(b *testing.B) { tests := []struct { name string writeRequestFactory func() proto.Marshaler - clean func(in interface{}) + clean func(in any) }{ { name: "no-pool", writeRequestFactory: func() proto.Marshaler { return &WriteRequest{Timeseries: ts} }, - clean: func(in interface{}) {}, + clean: func(in any) {}, }, { name: "byte pool", @@ -101,7 +101,7 @@ func BenchmarkMarshallWriteRequest(b *testing.B) { w.Timeseries = ts return w }, - clean: func(in interface{}) { + clean: func(in any) { ReuseWriteRequest(in.(*PreallocWriteRequest)) }, }, @@ -112,7 +112,7 @@ func BenchmarkMarshallWriteRequest(b *testing.B) { w.Timeseries = ts return w }, - clean: func(in interface{}) { + clean: func(in any) { ReuseWriteRequest(in.(*PreallocWriteRequest)) }, }, @@ -120,7 +120,7 @@ func BenchmarkMarshallWriteRequest(b *testing.B) { for _, tc := range tests { b.Run(tc.name, func(b *testing.B) { - for i := 0; i < b.N; i++ { + for b.Loop() { w := tc.writeRequestFactory() _, err := w.Marshal() require.NoError(b, err) diff --git a/pkg/distributed_execution/codec.go b/pkg/distributed_execution/codec.go new file mode 100644 index 0000000000..b4d0ce32f7 --- /dev/null +++ b/pkg/distributed_execution/codec.go @@ -0,0 +1,196 @@ +package distributed_execution + +import ( + "bytes" + "encoding/json" + "math" + + "github.com/prometheus/prometheus/model/labels" + "github.com/thanos-io/promql-engine/logicalplan" +) + +type jsonNode struct { + Type logicalplan.NodeType `json:"type"` + Data json.RawMessage `json:"data"` + Children []json.RawMessage `json:"children,omitempty"` +} + +const ( + nanVal = `"NaN"` + infVal = `"+Inf"` + negInfVal = `"-Inf"` +) + +// Unmarshal deserializes a logical plan node from JSON data. +// This is a custom implementation for Cortex that is copied from Thanos engine's unmarshaling func +// to support remote nodes. We maintain this separate implementation because Thanos engine's +// logical plan codec currently doesn't support custom node types in its unmarshaling process. +func Unmarshal(data []byte) (logicalplan.Node, error) { + return unmarshalNode(data) +} + +func unmarshalNode(data []byte) (logicalplan.Node, error) { + t := jsonNode{} + if err := json.Unmarshal(data, &t); err != nil { + return nil, err + } + + switch t.Type { + case logicalplan.VectorSelectorNode: + v := &logicalplan.VectorSelector{} + if err := json.Unmarshal(t.Data, v); err != nil { + return nil, err + } + var err error + for i, m := range v.LabelMatchers { + v.LabelMatchers[i], err = labels.NewMatcher(m.Type, m.Name, m.Value) + if err != nil { + return nil, err + } + } + return v, nil + case logicalplan.MatrixSelectorNode: + m := &logicalplan.MatrixSelector{} + if err := json.Unmarshal(t.Data, m); err != nil { + return nil, err + } + vs, err := unmarshalNode(t.Children[0]) + if err != nil { + return nil, err + } + m.VectorSelector = vs.(*logicalplan.VectorSelector) + return m, nil + case logicalplan.AggregationNode: + a := &logicalplan.Aggregation{} + if err := json.Unmarshal(t.Data, a); err != nil { + return nil, err + } + var err error + a.Expr, err = unmarshalNode(t.Children[0]) + if err != nil { + return nil, err + } + if len(t.Children) > 1 { + a.Param, err = unmarshalNode(t.Children[1]) + if err != nil { + return nil, err + } + } + return a, nil + case logicalplan.BinaryNode: + b := &logicalplan.Binary{} + if err := json.Unmarshal(t.Data, b); err != nil { + return nil, err + } + var err error + b.LHS, err = unmarshalNode(t.Children[0]) + if err != nil { + return nil, err + } + b.RHS, err = unmarshalNode(t.Children[1]) + if err != nil { + return nil, err + } + return b, nil + case logicalplan.FunctionNode: + f := &logicalplan.FunctionCall{} + if err := json.Unmarshal(t.Data, f); err != nil { + return nil, err + } + for _, c := range t.Children { + child, err := unmarshalNode(c) + if err != nil { + return nil, err + } + f.Args = append(f.Args, child) + } + return f, nil + case logicalplan.NumberLiteralNode: + n := &logicalplan.NumberLiteral{} + if bytes.Equal(t.Data, []byte(infVal)) { + n.Val = math.Inf(1) + } else if bytes.Equal(t.Data, []byte(negInfVal)) { + n.Val = math.Inf(-1) + } else if bytes.Equal(t.Data, []byte(nanVal)) { + n.Val = math.NaN() + } else { + if err := json.Unmarshal(t.Data, n); err != nil { + return nil, err + } + } + return n, nil + case logicalplan.StringLiteralNode: + s := &logicalplan.StringLiteral{} + if err := json.Unmarshal(t.Data, s); err != nil { + return nil, err + } + return s, nil + case logicalplan.SubqueryNode: + s := &logicalplan.Subquery{} + if err := json.Unmarshal(t.Data, s); err != nil { + return nil, err + } + var err error + s.Expr, err = unmarshalNode(t.Children[0]) + if err != nil { + return nil, err + } + return s, nil + case logicalplan.CheckDuplicateNode: + c := &logicalplan.CheckDuplicateLabels{} + if err := json.Unmarshal(t.Data, c); err != nil { + return nil, err + } + var err error + c.Expr, err = unmarshalNode(t.Children[0]) + if err != nil { + return nil, err + } + return c, nil + case logicalplan.StepInvariantNode: + s := &logicalplan.StepInvariantExpr{} + if err := json.Unmarshal(t.Data, s); err != nil { + return nil, err + } + var err error + s.Expr, err = unmarshalNode(t.Children[0]) + if err != nil { + return nil, err + } + return s, nil + case logicalplan.ParensNode: + p := &logicalplan.Parens{} + if err := json.Unmarshal(t.Data, p); err != nil { + return nil, err + } + var err error + p.Expr, err = unmarshalNode(t.Children[0]) + if err != nil { + return nil, err + } + return p, nil + case logicalplan.UnaryNode: + u := &logicalplan.Unary{} + if err := json.Unmarshal(t.Data, u); err != nil { + return nil, err + } + var err error + u.Expr, err = unmarshalNode(t.Children[0]) + if err != nil { + return nil, err + } + return u, nil + case RemoteNode: + r := &Remote{} + if err := json.Unmarshal(t.Data, r); err != nil { + return nil, err + } + var err error + r.Expr, err = unmarshalNode(t.Children[0]) + if err != nil { + return nil, err + } + return r, nil + } + return nil, nil +} diff --git a/pkg/distributed_execution/codec_test.go b/pkg/distributed_execution/codec_test.go new file mode 100644 index 0000000000..89fdd30f91 --- /dev/null +++ b/pkg/distributed_execution/codec_test.go @@ -0,0 +1,70 @@ +package distributed_execution + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + "github.com/thanos-io/promql-engine/logicalplan" +) + +func TestUnmarshalWithLogicalPlan(t *testing.T) { + t.Run("unmarshal complex query plan", func(t *testing.T) { + start := time.Now() + end := start.Add(1 * time.Hour) + step := 15 * time.Second + + testCases := []struct { + name string + query string + }{ + { + name: "binary operation", + query: "http_requests_total + rate(node_cpu_seconds_total[5m])", + }, + { + name: "aggregation", + query: "sum(rate(http_requests_total[5m])) by (job)", + }, + { + name: "complex query", + query: "sum(rate(http_requests_total{job='prometheus'}[5m])) by (job) / sum(rate(node_cpu_seconds_total[5m])) by (job)", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + plan, _, err := CreateTestLogicalPlan(tc.query, start, end, step) + require.NoError(t, err) + require.NotNil(t, plan) + + data, err := logicalplan.Marshal((*plan).Root()) + require.NoError(t, err) + + node, err := Unmarshal(data) + require.NoError(t, err) + require.NotNil(t, node) + + // the logical plan node before and after marshal/unmarshal should be the same + verifyNodeStructure(t, (*plan).Root(), node) + }) + } + }) +} + +func verifyNodeStructure(t *testing.T, expected logicalplan.Node, actual logicalplan.Node) { + require.Equal(t, expected.Type(), actual.Type()) + require.Equal(t, expected.String(), actual.String()) + require.Equal(t, expected.ReturnType(), actual.ReturnType()) + + expectedChildren := expected.Children() + actualChildren := actual.Children() + + require.Equal(t, len(expectedChildren), len(actualChildren)) + + for i := range expectedChildren { + if expectedChildren[i] != nil && actualChildren[i] != nil { + verifyNodeStructure(t, *expectedChildren[i], *actualChildren[i]) + } + } +} diff --git a/pkg/distributed_execution/distributed_optimizer.go b/pkg/distributed_execution/distributed_optimizer.go new file mode 100644 index 0000000000..4d0fdbe1d5 --- /dev/null +++ b/pkg/distributed_execution/distributed_optimizer.go @@ -0,0 +1,49 @@ +package distributed_execution + +import ( + "github.com/thanos-io/promql-engine/query" + + "github.com/prometheus/prometheus/util/annotations" + "github.com/thanos-io/promql-engine/logicalplan" +) + +// This is a simplified implementation that only handles binary aggregation cases +// Future versions of the distributed optimizer are expected to: +// - Support more complex query patterns +// - Incorporate diverse optimization strategies +// - Extend support to node types beyond binary operations + +type DistributedOptimizer struct{} + +func (d *DistributedOptimizer) Optimize(root logicalplan.Node, opts *query.Options) (logicalplan.Node, annotations.Annotations) { + warns := annotations.New() + + logicalplan.TraverseBottomUp(nil, &root, func(parent, current *logicalplan.Node) bool { + + if (*current).Type() == logicalplan.BinaryNode && d.hasAggregation(current) { + ch := (*current).Children() + + for _, child := range ch { + temp := (*child).Clone() + *child = NewRemoteNode(temp) + *(*child).Children()[0] = temp + } + } + + return false + }) + + return root, *warns +} + +func (d *DistributedOptimizer) hasAggregation(root *logicalplan.Node) bool { + isAggr := false + logicalplan.TraverseBottomUp(nil, root, func(parent, current *logicalplan.Node) bool { + if (*current).Type() == logicalplan.AggregationNode { + isAggr = true + return true + } + return false + }) + return isAggr +} diff --git a/pkg/distributed_execution/distributed_optimizer_test.go b/pkg/distributed_execution/distributed_optimizer_test.go new file mode 100644 index 0000000000..73e818cc6a --- /dev/null +++ b/pkg/distributed_execution/distributed_optimizer_test.go @@ -0,0 +1,123 @@ +package distributed_execution + +import ( + "testing" + "time" + + "github.com/prometheus/prometheus/promql/parser" + "github.com/stretchr/testify/require" + "github.com/thanos-io/promql-engine/logicalplan" + "github.com/thanos-io/promql-engine/query" +) + +func TestDistributedOptimizer(t *testing.T) { + now := time.Now() + testCases := []struct { + name string + query string + remoteExecCount int + expectedResult string + }{ + { + name: "binary operation with aggregations", + query: "sum(rate(node_cpu_seconds_total{mode!=\"idle\"}[5m])) + sum(rate(node_memory_Active_bytes[5m]))", + remoteExecCount: 2, + expectedResult: "remote(sum(rate(node_cpu_seconds_total{mode!=\"idle\"}[5m]))) + remote(sum(rate(node_memory_Active_bytes[5m])))", + }, + { + name: "binary operation with aggregations 2", + query: "count(node_cpu_seconds_total{mode!=\"idle\"}) + count(node_memory_Active_bytes)", + remoteExecCount: 2, + expectedResult: "remote(count(node_cpu_seconds_total{mode!=\"idle\"})) + remote(count(node_memory_Active_bytes))", + }, + { + name: "multiple binary operations with aggregations", + query: "sum(rate(http_requests_total{job=\"api\"}[5m])) + sum(rate(http_requests_total{job=\"web\"}[5m])) - sum(rate(http_requests_total{job=\"cache\"}[5m]))", + remoteExecCount: 4, + expectedResult: "remote(remote(sum(rate(http_requests_total{job=\"api\"}[5m]))) + remote(sum(rate(http_requests_total{job=\"web\"}[5m])))) - remote(sum(rate(http_requests_total{job=\"cache\"}[5m])))", + }, + { + name: "subquery with aggregation", + query: "sum(rate(container_network_transmit_bytes_total[5m:1m]))", + remoteExecCount: 0, + expectedResult: "sum(rate(container_network_transmit_bytes_total[5m:1m]))", + }, + { + name: "numerical binary query", + query: "(1 + 1) + (1 + 1)", + remoteExecCount: 0, + expectedResult: "4", + }, + { + name: "binary non-aggregation query", + query: "up + up", + remoteExecCount: 0, + expectedResult: "up + up", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + lp, _, err := CreateTestLogicalPlan(tc.query, now, now, time.Minute) + require.NoError(t, err) + + node := (*lp).Root() + + remoteNodeCount := 0 + logicalplan.TraverseBottomUp(nil, &node, func(parent, current *logicalplan.Node) bool { + if RemoteNode == (*current).Type() { + remoteNodeCount++ + } + return false + }) + require.Equal(t, tc.remoteExecCount, remoteNodeCount) + require.Equal(t, (*lp).Root().String(), tc.expectedResult) + }) + } +} + +func getStartAndEnd(start time.Time, end time.Time, step time.Duration) (time.Time, time.Time) { + if step == 0 { + return start, start + } + return start, end +} + +func CreateTestLogicalPlan(qs string, start time.Time, end time.Time, step time.Duration) (*logicalplan.Plan, query.Options, error) { + + start, end = getStartAndEnd(start, end, step) + + qOpts := query.Options{ + Start: start, + End: end, + Step: step, + StepsBatch: 10, + NoStepSubqueryIntervalFn: func(duration time.Duration) time.Duration { + return 0 + }, + LookbackDelta: 0, + EnablePerStepStats: false, + } + + expr, err := parser.NewParser(qs, parser.WithFunctions(parser.Functions)).ParseExpr() + if err != nil { + return nil, qOpts, err + } + + planOpts := logicalplan.PlanOptions{ + DisableDuplicateLabelCheck: false, + } + + logicalPlan, err := logicalplan.NewFromAST(expr, &qOpts, planOpts) + if err != nil { + return nil, qOpts, err + } + optimizedPlan, _ := logicalPlan.Optimize(logicalplan.DefaultOptimizers) + + distributedOptimizer := DistributedOptimizer{} + dOptimizedNode, _ := distributedOptimizer.Optimize(optimizedPlan.Root(), &qOpts) + + plan := logicalplan.New(dOptimizedNode, &qOpts, planOpts) + + return &plan, qOpts, nil +} diff --git a/pkg/distributed_execution/fragment_key.go b/pkg/distributed_execution/fragment_key.go new file mode 100644 index 0000000000..19ab0bb0bc --- /dev/null +++ b/pkg/distributed_execution/fragment_key.go @@ -0,0 +1,32 @@ +package distributed_execution + +// FragmentKey uniquely identifies a fragment of a distributed logical query plan. +// It combines a queryID (to identify the overall query) and a fragmentID +// (to identify the specific fragment within that query). +type FragmentKey struct { + // queryID identifies the distributed query this fragment belongs to + queryID uint64 + // fragmentID identifies this specific fragment within the query + fragmentID uint64 +} + +// MakeFragmentKey creates a new FragmentKey with the given queryID and fragmentID. +// It's used to track and identify fragments during distributed query execution. +func MakeFragmentKey(queryID uint64, fragmentID uint64) FragmentKey { + return FragmentKey{ + queryID: queryID, + fragmentID: fragmentID, + } +} + +// GetQueryID returns the queryID for the current key +// This ID is shared across all fragments of the same distributed query. +func (f FragmentKey) GetQueryID() uint64 { + return f.queryID +} + +// GetFragmentID returns the ID for this specific fragment +// within its parent query. +func (f FragmentKey) GetFragmentID() uint64 { + return f.fragmentID +} diff --git a/pkg/distributed_execution/plan_fragments/fragmenter.go b/pkg/distributed_execution/plan_fragments/fragmenter.go new file mode 100644 index 0000000000..d2bd187dfa --- /dev/null +++ b/pkg/distributed_execution/plan_fragments/fragmenter.go @@ -0,0 +1,52 @@ +package plan_fragments + +import "github.com/thanos-io/promql-engine/logicalplan" + +// Fragmenter interface +type Fragmenter interface { + // Fragment function fragments the logical query plan and will always return the fragment in the order of child-to-root + // in other words, the order of the fragment in the array will be the order they are being scheduled + Fragment(node logicalplan.Node) ([]Fragment, error) +} + +type DummyFragmenter struct { +} + +func (f *DummyFragmenter) Fragment(node logicalplan.Node) ([]Fragment, error) { + // simple logic without distributed optimizer + return []Fragment{ + { + Node: node, + FragmentID: uint64(1), + ChildIDs: []uint64{}, + IsRoot: true, + }, + }, nil +} + +type Fragment struct { + Node logicalplan.Node + FragmentID uint64 + ChildIDs []uint64 + IsRoot bool +} + +func (s *Fragment) IsEmpty() bool { + if s.Node != nil { + return false + } + if s.FragmentID != 0 { + return false + } + if s.IsRoot { + return false + } + if len(s.ChildIDs) != 0 { + return false + } + return true +} + +func NewDummyFragmenter() Fragmenter { + return &DummyFragmenter{} +} diff --git a/pkg/distributed_execution/plan_fragments/fragmenter_test.go b/pkg/distributed_execution/plan_fragments/fragmenter_test.go new file mode 100644 index 0000000000..65f4c20022 --- /dev/null +++ b/pkg/distributed_execution/plan_fragments/fragmenter_test.go @@ -0,0 +1,46 @@ +package plan_fragments + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/cortexproject/cortex/pkg/util/logical_plan" +) + +func TestFragmenter(t *testing.T) { + type testCase struct { + name string + query string + start time.Time + end time.Time + expectedFragments int + } + + now := time.Now() + + // more tests will be added when distributed optimizer and fragmenter are implemented + tests := []testCase{ + { + name: "simple logical query plan - no fragmentation", + query: "up", + start: now, + end: now, + expectedFragments: 1, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + lp, err := logical_plan.CreateTestLogicalPlan(tc.query, tc.start, tc.end, 0) + require.NoError(t, err) + + fragmenter := NewDummyFragmenter() + res, err := fragmenter.Fragment((*lp).Root()) + + require.NoError(t, err) + require.Equal(t, tc.expectedFragments, len(res)) + }) + } +} diff --git a/pkg/distributed_execution/querierpb/querier.pb.go b/pkg/distributed_execution/querierpb/querier.pb.go new file mode 100644 index 0000000000..64038392ba --- /dev/null +++ b/pkg/distributed_execution/querierpb/querier.pb.go @@ -0,0 +1,2551 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: querier.proto + +package querierpb + +import ( + context "context" + encoding_binary "encoding/binary" + fmt "fmt" + cortexpb "github.com/cortexproject/cortex/pkg/cortexpb" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// SeriesRequest contains parameters for series streaming +type SeriesRequest struct { + QueryID uint64 `protobuf:"varint,1,opt,name=queryID,proto3" json:"queryID,omitempty"` + FragmentID uint64 `protobuf:"varint,2,opt,name=fragmentID,proto3" json:"fragmentID,omitempty"` + Batchsize int64 `protobuf:"varint,3,opt,name=batchsize,proto3" json:"batchsize,omitempty"` +} + +func (m *SeriesRequest) Reset() { *m = SeriesRequest{} } +func (*SeriesRequest) ProtoMessage() {} +func (*SeriesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7edfe438abd6b96f, []int{0} +} +func (m *SeriesRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SeriesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SeriesRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SeriesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SeriesRequest.Merge(m, src) +} +func (m *SeriesRequest) XXX_Size() int { + return m.Size() +} +func (m *SeriesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SeriesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SeriesRequest proto.InternalMessageInfo + +func (m *SeriesRequest) GetQueryID() uint64 { + if m != nil { + return m.QueryID + } + return 0 +} + +func (m *SeriesRequest) GetFragmentID() uint64 { + if m != nil { + return m.FragmentID + } + return 0 +} + +func (m *SeriesRequest) GetBatchsize() int64 { + if m != nil { + return m.Batchsize + } + return 0 +} + +// NextRequest contains parameters for data streaming +type NextRequest struct { + QueryID uint64 `protobuf:"varint,1,opt,name=queryID,proto3" json:"queryID,omitempty"` + FragmentID uint64 `protobuf:"varint,2,opt,name=fragmentID,proto3" json:"fragmentID,omitempty"` + Batchsize int64 `protobuf:"varint,3,opt,name=batchsize,proto3" json:"batchsize,omitempty"` +} + +func (m *NextRequest) Reset() { *m = NextRequest{} } +func (*NextRequest) ProtoMessage() {} +func (*NextRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7edfe438abd6b96f, []int{1} +} +func (m *NextRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NextRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_NextRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *NextRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_NextRequest.Merge(m, src) +} +func (m *NextRequest) XXX_Size() int { + return m.Size() +} +func (m *NextRequest) XXX_DiscardUnknown() { + xxx_messageInfo_NextRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_NextRequest proto.InternalMessageInfo + +func (m *NextRequest) GetQueryID() uint64 { + if m != nil { + return m.QueryID + } + return 0 +} + +func (m *NextRequest) GetFragmentID() uint64 { + if m != nil { + return m.FragmentID + } + return 0 +} + +func (m *NextRequest) GetBatchsize() int64 { + if m != nil { + return m.Batchsize + } + return 0 +} + +// SeriesBatch contains a collection of series metadata +type SeriesBatch struct { + OneSeries []*OneSeries `protobuf:"bytes,1,rep,name=OneSeries,proto3" json:"OneSeries,omitempty"` +} + +func (m *SeriesBatch) Reset() { *m = SeriesBatch{} } +func (*SeriesBatch) ProtoMessage() {} +func (*SeriesBatch) Descriptor() ([]byte, []int) { + return fileDescriptor_7edfe438abd6b96f, []int{2} +} +func (m *SeriesBatch) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SeriesBatch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SeriesBatch.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SeriesBatch) XXX_Merge(src proto.Message) { + xxx_messageInfo_SeriesBatch.Merge(m, src) +} +func (m *SeriesBatch) XXX_Size() int { + return m.Size() +} +func (m *SeriesBatch) XXX_DiscardUnknown() { + xxx_messageInfo_SeriesBatch.DiscardUnknown(m) +} + +var xxx_messageInfo_SeriesBatch proto.InternalMessageInfo + +func (m *SeriesBatch) GetOneSeries() []*OneSeries { + if m != nil { + return m.OneSeries + } + return nil +} + +// OneSeries represents a single time series with its labels +// Used to describe the shape of incoming data +type OneSeries struct { + Labels []*Label `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels,omitempty"` +} + +func (m *OneSeries) Reset() { *m = OneSeries{} } +func (*OneSeries) ProtoMessage() {} +func (*OneSeries) Descriptor() ([]byte, []int) { + return fileDescriptor_7edfe438abd6b96f, []int{3} +} +func (m *OneSeries) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OneSeries) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_OneSeries.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *OneSeries) XXX_Merge(src proto.Message) { + xxx_messageInfo_OneSeries.Merge(m, src) +} +func (m *OneSeries) XXX_Size() int { + return m.Size() +} +func (m *OneSeries) XXX_DiscardUnknown() { + xxx_messageInfo_OneSeries.DiscardUnknown(m) +} + +var xxx_messageInfo_OneSeries proto.InternalMessageInfo + +func (m *OneSeries) GetLabels() []*Label { + if m != nil { + return m.Labels + } + return nil +} + +// StepVectorBatch contains a collection of step vectors +type StepVectorBatch struct { + StepVectors []*StepVector `protobuf:"bytes,1,rep,name=step_vectors,json=stepVectors,proto3" json:"step_vectors,omitempty"` +} + +func (m *StepVectorBatch) Reset() { *m = StepVectorBatch{} } +func (*StepVectorBatch) ProtoMessage() {} +func (*StepVectorBatch) Descriptor() ([]byte, []int) { + return fileDescriptor_7edfe438abd6b96f, []int{4} +} +func (m *StepVectorBatch) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StepVectorBatch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StepVectorBatch.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *StepVectorBatch) XXX_Merge(src proto.Message) { + xxx_messageInfo_StepVectorBatch.Merge(m, src) +} +func (m *StepVectorBatch) XXX_Size() int { + return m.Size() +} +func (m *StepVectorBatch) XXX_DiscardUnknown() { + xxx_messageInfo_StepVectorBatch.DiscardUnknown(m) +} + +var xxx_messageInfo_StepVectorBatch proto.InternalMessageInfo + +func (m *StepVectorBatch) GetStepVectors() []*StepVector { + if m != nil { + return m.StepVectors + } + return nil +} + +// StepVector represents data points at a specific timestamp +type StepVector struct { + T int64 `protobuf:"varint,1,opt,name=t,proto3" json:"t,omitempty"` + Sample_IDs []uint64 `protobuf:"varint,2,rep,packed,name=sample_IDs,json=sampleIDs,proto3" json:"sample_IDs,omitempty"` + Samples []float64 `protobuf:"fixed64,3,rep,packed,name=samples,proto3" json:"samples,omitempty"` + Histogram_IDs []uint64 `protobuf:"varint,4,rep,packed,name=histogram_IDs,json=histogramIDs,proto3" json:"histogram_IDs,omitempty"` + Histograms []cortexpb.Histogram `protobuf:"bytes,5,rep,name=histograms,proto3" json:"histograms"` +} + +func (m *StepVector) Reset() { *m = StepVector{} } +func (*StepVector) ProtoMessage() {} +func (*StepVector) Descriptor() ([]byte, []int) { + return fileDescriptor_7edfe438abd6b96f, []int{5} +} +func (m *StepVector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StepVector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StepVector.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *StepVector) XXX_Merge(src proto.Message) { + xxx_messageInfo_StepVector.Merge(m, src) +} +func (m *StepVector) XXX_Size() int { + return m.Size() +} +func (m *StepVector) XXX_DiscardUnknown() { + xxx_messageInfo_StepVector.DiscardUnknown(m) +} + +var xxx_messageInfo_StepVector proto.InternalMessageInfo + +func (m *StepVector) GetT() int64 { + if m != nil { + return m.T + } + return 0 +} + +func (m *StepVector) GetSample_IDs() []uint64 { + if m != nil { + return m.Sample_IDs + } + return nil +} + +func (m *StepVector) GetSamples() []float64 { + if m != nil { + return m.Samples + } + return nil +} + +func (m *StepVector) GetHistogram_IDs() []uint64 { + if m != nil { + return m.Histogram_IDs + } + return nil +} + +func (m *StepVector) GetHistograms() []cortexpb.Histogram { + if m != nil { + return m.Histograms + } + return nil +} + +// Label represents a key-value pair for series metadata +type Label struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *Label) Reset() { *m = Label{} } +func (*Label) ProtoMessage() {} +func (*Label) Descriptor() ([]byte, []int) { + return fileDescriptor_7edfe438abd6b96f, []int{6} +} +func (m *Label) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Label) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Label.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Label) XXX_Merge(src proto.Message) { + xxx_messageInfo_Label.Merge(m, src) +} +func (m *Label) XXX_Size() int { + return m.Size() +} +func (m *Label) XXX_DiscardUnknown() { + xxx_messageInfo_Label.DiscardUnknown(m) +} + +var xxx_messageInfo_Label proto.InternalMessageInfo + +func (m *Label) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Label) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +func init() { + proto.RegisterType((*SeriesRequest)(nil), "querierpb.SeriesRequest") + proto.RegisterType((*NextRequest)(nil), "querierpb.NextRequest") + proto.RegisterType((*SeriesBatch)(nil), "querierpb.SeriesBatch") + proto.RegisterType((*OneSeries)(nil), "querierpb.OneSeries") + proto.RegisterType((*StepVectorBatch)(nil), "querierpb.StepVectorBatch") + proto.RegisterType((*StepVector)(nil), "querierpb.StepVector") + proto.RegisterType((*Label)(nil), "querierpb.Label") +} + +func init() { proto.RegisterFile("querier.proto", fileDescriptor_7edfe438abd6b96f) } + +var fileDescriptor_7edfe438abd6b96f = []byte{ + // 509 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x53, 0x3d, 0x8f, 0xd3, 0x40, + 0x10, 0xf5, 0x9e, 0x9d, 0x9c, 0x3c, 0x49, 0x04, 0x5a, 0x02, 0xb2, 0x2c, 0x58, 0x2c, 0xd3, 0xb8, + 0x72, 0x20, 0x08, 0x89, 0x6b, 0x0e, 0x11, 0xa5, 0x20, 0x02, 0x81, 0xd8, 0x93, 0x28, 0x68, 0x4e, + 0xb6, 0xb5, 0x38, 0x81, 0x38, 0xf6, 0x79, 0x37, 0xa7, 0x83, 0x8a, 0x96, 0x8e, 0x9f, 0x41, 0xcf, + 0x9f, 0xb8, 0x32, 0xe5, 0x55, 0x88, 0x38, 0x0d, 0x65, 0x7e, 0x02, 0xf2, 0xfa, 0x53, 0x82, 0xf6, + 0xba, 0x79, 0xef, 0xcd, 0xdb, 0x59, 0x3f, 0xcf, 0xc2, 0xe0, 0x6c, 0xcd, 0xd2, 0x05, 0x4b, 0xdd, + 0x24, 0x8d, 0x45, 0x8c, 0xf5, 0x12, 0x26, 0xbe, 0x39, 0x0c, 0xe3, 0x30, 0x96, 0xec, 0x28, 0xaf, + 0x8a, 0x06, 0xf3, 0x28, 0x5c, 0x88, 0xf9, 0xda, 0x77, 0x83, 0x38, 0x1a, 0x05, 0x71, 0x2a, 0xd8, + 0x45, 0x92, 0xc6, 0x1f, 0x59, 0x20, 0x4a, 0x34, 0x4a, 0x3e, 0x85, 0x95, 0xe0, 0x97, 0x45, 0x61, + 0xb5, 0x43, 0x18, 0x9c, 0xe4, 0x87, 0x73, 0xca, 0xce, 0xd6, 0x8c, 0x0b, 0x6c, 0xc0, 0x61, 0x3e, + 0xee, 0xf3, 0x6c, 0x6a, 0x20, 0x0b, 0x39, 0x1a, 0xad, 0x20, 0x26, 0x00, 0x1f, 0x52, 0x2f, 0x8c, + 0xd8, 0x4a, 0xcc, 0xa6, 0xc6, 0x81, 0x14, 0x5b, 0x0c, 0xbe, 0x0b, 0xba, 0xef, 0x89, 0x60, 0xce, + 0x17, 0x5f, 0x98, 0xa1, 0x5a, 0xc8, 0x51, 0x69, 0x43, 0xd8, 0x0c, 0x7a, 0xaf, 0xd9, 0x85, 0xb8, + 0xee, 0x31, 0xcf, 0xa1, 0x57, 0x7c, 0xcf, 0x24, 0xa7, 0xf0, 0x18, 0xf4, 0x37, 0x2b, 0x56, 0x30, + 0x06, 0xb2, 0x54, 0xa7, 0x37, 0x1e, 0xba, 0x75, 0x9c, 0x6e, 0xad, 0xd1, 0xa6, 0xcd, 0x7e, 0xd2, + 0xf2, 0x60, 0x07, 0xba, 0x4b, 0xcf, 0x67, 0xcb, 0xca, 0x7d, 0xb3, 0xe5, 0x7e, 0x95, 0x0b, 0xb4, + 0xd4, 0xed, 0x97, 0x70, 0xe3, 0x44, 0xb0, 0xe4, 0x1d, 0x0b, 0x44, 0x9c, 0x16, 0xd3, 0x9f, 0x42, + 0x9f, 0x0b, 0x96, 0x9c, 0x9e, 0x4b, 0xae, 0x3a, 0xe2, 0x76, 0xeb, 0x88, 0xc6, 0x41, 0x7b, 0xbc, + 0xae, 0xb9, 0xfd, 0x13, 0x01, 0x34, 0x1a, 0xee, 0x03, 0x12, 0x32, 0x27, 0x95, 0x22, 0x81, 0xef, + 0x01, 0x70, 0x2f, 0x4a, 0x96, 0xec, 0x74, 0x36, 0xe5, 0xc6, 0x81, 0xa5, 0x3a, 0x1a, 0xd5, 0x0b, + 0x66, 0x36, 0xe5, 0x79, 0xb4, 0x05, 0xe0, 0x86, 0x6a, 0xa9, 0x0e, 0xa2, 0x15, 0xc4, 0x0f, 0x60, + 0x30, 0x5f, 0x70, 0x11, 0x87, 0xa9, 0x17, 0x49, 0xaf, 0x26, 0xbd, 0xfd, 0x9a, 0xcc, 0xed, 0x47, + 0x00, 0x35, 0xe6, 0x46, 0x47, 0x5e, 0xf9, 0x96, 0x5b, 0x6d, 0x8f, 0xfb, 0xa2, 0xd2, 0x26, 0xda, + 0xe5, 0xaf, 0xfb, 0x0a, 0x6d, 0x35, 0xdb, 0x8f, 0xa0, 0x23, 0x33, 0xc1, 0x18, 0xb4, 0x95, 0x17, + 0x31, 0x79, 0x65, 0x9d, 0xca, 0x1a, 0x0f, 0xa1, 0x73, 0xee, 0x2d, 0xd7, 0x4c, 0xfe, 0x52, 0x9d, + 0x16, 0x60, 0xfc, 0x0d, 0xc1, 0xe1, 0xdb, 0x22, 0x0e, 0x7c, 0x0c, 0xdd, 0x32, 0x75, 0xa3, 0x1d, + 0x51, 0x7b, 0x3d, 0xcd, 0x3b, 0xff, 0x28, 0x32, 0x6a, 0x5b, 0x79, 0x88, 0xf0, 0x31, 0x68, 0xf9, + 0x8a, 0xe1, 0x76, 0x4f, 0x6b, 0xe7, 0x4c, 0xf3, 0xbf, 0xc1, 0xd7, 0xfe, 0xc9, 0xb3, 0xcd, 0x96, + 0x28, 0x57, 0x5b, 0xa2, 0xec, 0xb7, 0x04, 0x7d, 0xcd, 0x08, 0xfa, 0x91, 0x11, 0x74, 0x99, 0x11, + 0xb4, 0xc9, 0x08, 0xfa, 0x9d, 0x11, 0xf4, 0x27, 0x23, 0xca, 0x3e, 0x23, 0xe8, 0xfb, 0x8e, 0x28, + 0x9b, 0x1d, 0x51, 0xae, 0x76, 0x44, 0x79, 0xdf, 0xbc, 0x4e, 0xbf, 0x2b, 0xdf, 0xd4, 0xe3, 0xbf, + 0x01, 0x00, 0x00, 0xff, 0xff, 0x5a, 0x7a, 0xce, 0x55, 0xc0, 0x03, 0x00, 0x00, +} + +func (this *SeriesRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*SeriesRequest) + if !ok { + that2, ok := that.(SeriesRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.QueryID != that1.QueryID { + return false + } + if this.FragmentID != that1.FragmentID { + return false + } + if this.Batchsize != that1.Batchsize { + return false + } + return true +} +func (this *NextRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*NextRequest) + if !ok { + that2, ok := that.(NextRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.QueryID != that1.QueryID { + return false + } + if this.FragmentID != that1.FragmentID { + return false + } + if this.Batchsize != that1.Batchsize { + return false + } + return true +} +func (this *SeriesBatch) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*SeriesBatch) + if !ok { + that2, ok := that.(SeriesBatch) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.OneSeries) != len(that1.OneSeries) { + return false + } + for i := range this.OneSeries { + if !this.OneSeries[i].Equal(that1.OneSeries[i]) { + return false + } + } + return true +} +func (this *OneSeries) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*OneSeries) + if !ok { + that2, ok := that.(OneSeries) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Labels) != len(that1.Labels) { + return false + } + for i := range this.Labels { + if !this.Labels[i].Equal(that1.Labels[i]) { + return false + } + } + return true +} +func (this *StepVectorBatch) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*StepVectorBatch) + if !ok { + that2, ok := that.(StepVectorBatch) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.StepVectors) != len(that1.StepVectors) { + return false + } + for i := range this.StepVectors { + if !this.StepVectors[i].Equal(that1.StepVectors[i]) { + return false + } + } + return true +} +func (this *StepVector) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*StepVector) + if !ok { + that2, ok := that.(StepVector) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.T != that1.T { + return false + } + if len(this.Sample_IDs) != len(that1.Sample_IDs) { + return false + } + for i := range this.Sample_IDs { + if this.Sample_IDs[i] != that1.Sample_IDs[i] { + return false + } + } + if len(this.Samples) != len(that1.Samples) { + return false + } + for i := range this.Samples { + if this.Samples[i] != that1.Samples[i] { + return false + } + } + if len(this.Histogram_IDs) != len(that1.Histogram_IDs) { + return false + } + for i := range this.Histogram_IDs { + if this.Histogram_IDs[i] != that1.Histogram_IDs[i] { + return false + } + } + if len(this.Histograms) != len(that1.Histograms) { + return false + } + for i := range this.Histograms { + if !this.Histograms[i].Equal(&that1.Histograms[i]) { + return false + } + } + return true +} +func (this *Label) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Label) + if !ok { + that2, ok := that.(Label) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if this.Value != that1.Value { + return false + } + return true +} +func (this *SeriesRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&querierpb.SeriesRequest{") + s = append(s, "QueryID: "+fmt.Sprintf("%#v", this.QueryID)+",\n") + s = append(s, "FragmentID: "+fmt.Sprintf("%#v", this.FragmentID)+",\n") + s = append(s, "Batchsize: "+fmt.Sprintf("%#v", this.Batchsize)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *NextRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&querierpb.NextRequest{") + s = append(s, "QueryID: "+fmt.Sprintf("%#v", this.QueryID)+",\n") + s = append(s, "FragmentID: "+fmt.Sprintf("%#v", this.FragmentID)+",\n") + s = append(s, "Batchsize: "+fmt.Sprintf("%#v", this.Batchsize)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *SeriesBatch) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&querierpb.SeriesBatch{") + if this.OneSeries != nil { + s = append(s, "OneSeries: "+fmt.Sprintf("%#v", this.OneSeries)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *OneSeries) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&querierpb.OneSeries{") + if this.Labels != nil { + s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *StepVectorBatch) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&querierpb.StepVectorBatch{") + if this.StepVectors != nil { + s = append(s, "StepVectors: "+fmt.Sprintf("%#v", this.StepVectors)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *StepVector) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&querierpb.StepVector{") + s = append(s, "T: "+fmt.Sprintf("%#v", this.T)+",\n") + s = append(s, "Sample_IDs: "+fmt.Sprintf("%#v", this.Sample_IDs)+",\n") + s = append(s, "Samples: "+fmt.Sprintf("%#v", this.Samples)+",\n") + s = append(s, "Histogram_IDs: "+fmt.Sprintf("%#v", this.Histogram_IDs)+",\n") + if this.Histograms != nil { + vs := make([]*cortexpb.Histogram, len(this.Histograms)) + for i := range vs { + vs[i] = &this.Histograms[i] + } + s = append(s, "Histograms: "+fmt.Sprintf("%#v", vs)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Label) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&querierpb.Label{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringQuerier(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// QuerierClient is the client API for Querier service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type QuerierClient interface { + Series(ctx context.Context, in *SeriesRequest, opts ...grpc.CallOption) (Querier_SeriesClient, error) + Next(ctx context.Context, in *NextRequest, opts ...grpc.CallOption) (Querier_NextClient, error) +} + +type querierClient struct { + cc *grpc.ClientConn +} + +func NewQuerierClient(cc *grpc.ClientConn) QuerierClient { + return &querierClient{cc} +} + +func (c *querierClient) Series(ctx context.Context, in *SeriesRequest, opts ...grpc.CallOption) (Querier_SeriesClient, error) { + stream, err := c.cc.NewStream(ctx, &_Querier_serviceDesc.Streams[0], "/querierpb.Querier/Series", opts...) + if err != nil { + return nil, err + } + x := &querierSeriesClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Querier_SeriesClient interface { + Recv() (*SeriesBatch, error) + grpc.ClientStream +} + +type querierSeriesClient struct { + grpc.ClientStream +} + +func (x *querierSeriesClient) Recv() (*SeriesBatch, error) { + m := new(SeriesBatch) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *querierClient) Next(ctx context.Context, in *NextRequest, opts ...grpc.CallOption) (Querier_NextClient, error) { + stream, err := c.cc.NewStream(ctx, &_Querier_serviceDesc.Streams[1], "/querierpb.Querier/Next", opts...) + if err != nil { + return nil, err + } + x := &querierNextClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Querier_NextClient interface { + Recv() (*StepVectorBatch, error) + grpc.ClientStream +} + +type querierNextClient struct { + grpc.ClientStream +} + +func (x *querierNextClient) Recv() (*StepVectorBatch, error) { + m := new(StepVectorBatch) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// QuerierServer is the server API for Querier service. +type QuerierServer interface { + Series(*SeriesRequest, Querier_SeriesServer) error + Next(*NextRequest, Querier_NextServer) error +} + +// UnimplementedQuerierServer can be embedded to have forward compatible implementations. +type UnimplementedQuerierServer struct { +} + +func (*UnimplementedQuerierServer) Series(req *SeriesRequest, srv Querier_SeriesServer) error { + return status.Errorf(codes.Unimplemented, "method Series not implemented") +} +func (*UnimplementedQuerierServer) Next(req *NextRequest, srv Querier_NextServer) error { + return status.Errorf(codes.Unimplemented, "method Next not implemented") +} + +func RegisterQuerierServer(s *grpc.Server, srv QuerierServer) { + s.RegisterService(&_Querier_serviceDesc, srv) +} + +func _Querier_Series_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(SeriesRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(QuerierServer).Series(m, &querierSeriesServer{stream}) +} + +type Querier_SeriesServer interface { + Send(*SeriesBatch) error + grpc.ServerStream +} + +type querierSeriesServer struct { + grpc.ServerStream +} + +func (x *querierSeriesServer) Send(m *SeriesBatch) error { + return x.ServerStream.SendMsg(m) +} + +func _Querier_Next_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(NextRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(QuerierServer).Next(m, &querierNextServer{stream}) +} + +type Querier_NextServer interface { + Send(*StepVectorBatch) error + grpc.ServerStream +} + +type querierNextServer struct { + grpc.ServerStream +} + +func (x *querierNextServer) Send(m *StepVectorBatch) error { + return x.ServerStream.SendMsg(m) +} + +var _Querier_serviceDesc = grpc.ServiceDesc{ + ServiceName: "querierpb.Querier", + HandlerType: (*QuerierServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "Series", + Handler: _Querier_Series_Handler, + ServerStreams: true, + }, + { + StreamName: "Next", + Handler: _Querier_Next_Handler, + ServerStreams: true, + }, + }, + Metadata: "querier.proto", +} + +func (m *SeriesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SeriesRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SeriesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Batchsize != 0 { + i = encodeVarintQuerier(dAtA, i, uint64(m.Batchsize)) + i-- + dAtA[i] = 0x18 + } + if m.FragmentID != 0 { + i = encodeVarintQuerier(dAtA, i, uint64(m.FragmentID)) + i-- + dAtA[i] = 0x10 + } + if m.QueryID != 0 { + i = encodeVarintQuerier(dAtA, i, uint64(m.QueryID)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *NextRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NextRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NextRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Batchsize != 0 { + i = encodeVarintQuerier(dAtA, i, uint64(m.Batchsize)) + i-- + dAtA[i] = 0x18 + } + if m.FragmentID != 0 { + i = encodeVarintQuerier(dAtA, i, uint64(m.FragmentID)) + i-- + dAtA[i] = 0x10 + } + if m.QueryID != 0 { + i = encodeVarintQuerier(dAtA, i, uint64(m.QueryID)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *SeriesBatch) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SeriesBatch) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SeriesBatch) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.OneSeries) > 0 { + for iNdEx := len(m.OneSeries) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.OneSeries[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuerier(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *OneSeries) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OneSeries) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *OneSeries) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Labels) > 0 { + for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Labels[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuerier(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *StepVectorBatch) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StepVectorBatch) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StepVectorBatch) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.StepVectors) > 0 { + for iNdEx := len(m.StepVectors) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.StepVectors[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuerier(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *StepVector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StepVector) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StepVector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Histograms) > 0 { + for iNdEx := len(m.Histograms) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Histograms[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuerier(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if len(m.Histogram_IDs) > 0 { + dAtA2 := make([]byte, len(m.Histogram_IDs)*10) + var j1 int + for _, num := range m.Histogram_IDs { + for num >= 1<<7 { + dAtA2[j1] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j1++ + } + dAtA2[j1] = uint8(num) + j1++ + } + i -= j1 + copy(dAtA[i:], dAtA2[:j1]) + i = encodeVarintQuerier(dAtA, i, uint64(j1)) + i-- + dAtA[i] = 0x22 + } + if len(m.Samples) > 0 { + for iNdEx := len(m.Samples) - 1; iNdEx >= 0; iNdEx-- { + f3 := math.Float64bits(float64(m.Samples[iNdEx])) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f3)) + } + i = encodeVarintQuerier(dAtA, i, uint64(len(m.Samples)*8)) + i-- + dAtA[i] = 0x1a + } + if len(m.Sample_IDs) > 0 { + dAtA5 := make([]byte, len(m.Sample_IDs)*10) + var j4 int + for _, num := range m.Sample_IDs { + for num >= 1<<7 { + dAtA5[j4] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j4++ + } + dAtA5[j4] = uint8(num) + j4++ + } + i -= j4 + copy(dAtA[i:], dAtA5[:j4]) + i = encodeVarintQuerier(dAtA, i, uint64(j4)) + i-- + dAtA[i] = 0x12 + } + if m.T != 0 { + i = encodeVarintQuerier(dAtA, i, uint64(m.T)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Label) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Label) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Label) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintQuerier(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintQuerier(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintQuerier(dAtA []byte, offset int, v uint64) int { + offset -= sovQuerier(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *SeriesRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.QueryID != 0 { + n += 1 + sovQuerier(uint64(m.QueryID)) + } + if m.FragmentID != 0 { + n += 1 + sovQuerier(uint64(m.FragmentID)) + } + if m.Batchsize != 0 { + n += 1 + sovQuerier(uint64(m.Batchsize)) + } + return n +} + +func (m *NextRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.QueryID != 0 { + n += 1 + sovQuerier(uint64(m.QueryID)) + } + if m.FragmentID != 0 { + n += 1 + sovQuerier(uint64(m.FragmentID)) + } + if m.Batchsize != 0 { + n += 1 + sovQuerier(uint64(m.Batchsize)) + } + return n +} + +func (m *SeriesBatch) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.OneSeries) > 0 { + for _, e := range m.OneSeries { + l = e.Size() + n += 1 + l + sovQuerier(uint64(l)) + } + } + return n +} + +func (m *OneSeries) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Labels) > 0 { + for _, e := range m.Labels { + l = e.Size() + n += 1 + l + sovQuerier(uint64(l)) + } + } + return n +} + +func (m *StepVectorBatch) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.StepVectors) > 0 { + for _, e := range m.StepVectors { + l = e.Size() + n += 1 + l + sovQuerier(uint64(l)) + } + } + return n +} + +func (m *StepVector) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.T != 0 { + n += 1 + sovQuerier(uint64(m.T)) + } + if len(m.Sample_IDs) > 0 { + l = 0 + for _, e := range m.Sample_IDs { + l += sovQuerier(uint64(e)) + } + n += 1 + sovQuerier(uint64(l)) + l + } + if len(m.Samples) > 0 { + n += 1 + sovQuerier(uint64(len(m.Samples)*8)) + len(m.Samples)*8 + } + if len(m.Histogram_IDs) > 0 { + l = 0 + for _, e := range m.Histogram_IDs { + l += sovQuerier(uint64(e)) + } + n += 1 + sovQuerier(uint64(l)) + l + } + if len(m.Histograms) > 0 { + for _, e := range m.Histograms { + l = e.Size() + n += 1 + l + sovQuerier(uint64(l)) + } + } + return n +} + +func (m *Label) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovQuerier(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovQuerier(uint64(l)) + } + return n +} + +func sovQuerier(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozQuerier(x uint64) (n int) { + return sovQuerier(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *SeriesRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SeriesRequest{`, + `QueryID:` + fmt.Sprintf("%v", this.QueryID) + `,`, + `FragmentID:` + fmt.Sprintf("%v", this.FragmentID) + `,`, + `Batchsize:` + fmt.Sprintf("%v", this.Batchsize) + `,`, + `}`, + }, "") + return s +} +func (this *NextRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NextRequest{`, + `QueryID:` + fmt.Sprintf("%v", this.QueryID) + `,`, + `FragmentID:` + fmt.Sprintf("%v", this.FragmentID) + `,`, + `Batchsize:` + fmt.Sprintf("%v", this.Batchsize) + `,`, + `}`, + }, "") + return s +} +func (this *SeriesBatch) String() string { + if this == nil { + return "nil" + } + repeatedStringForOneSeries := "[]*OneSeries{" + for _, f := range this.OneSeries { + repeatedStringForOneSeries += strings.Replace(f.String(), "OneSeries", "OneSeries", 1) + "," + } + repeatedStringForOneSeries += "}" + s := strings.Join([]string{`&SeriesBatch{`, + `OneSeries:` + repeatedStringForOneSeries + `,`, + `}`, + }, "") + return s +} +func (this *OneSeries) String() string { + if this == nil { + return "nil" + } + repeatedStringForLabels := "[]*Label{" + for _, f := range this.Labels { + repeatedStringForLabels += strings.Replace(f.String(), "Label", "Label", 1) + "," + } + repeatedStringForLabels += "}" + s := strings.Join([]string{`&OneSeries{`, + `Labels:` + repeatedStringForLabels + `,`, + `}`, + }, "") + return s +} +func (this *StepVectorBatch) String() string { + if this == nil { + return "nil" + } + repeatedStringForStepVectors := "[]*StepVector{" + for _, f := range this.StepVectors { + repeatedStringForStepVectors += strings.Replace(f.String(), "StepVector", "StepVector", 1) + "," + } + repeatedStringForStepVectors += "}" + s := strings.Join([]string{`&StepVectorBatch{`, + `StepVectors:` + repeatedStringForStepVectors + `,`, + `}`, + }, "") + return s +} +func (this *StepVector) String() string { + if this == nil { + return "nil" + } + repeatedStringForHistograms := "[]Histogram{" + for _, f := range this.Histograms { + repeatedStringForHistograms += fmt.Sprintf("%v", f) + "," + } + repeatedStringForHistograms += "}" + s := strings.Join([]string{`&StepVector{`, + `T:` + fmt.Sprintf("%v", this.T) + `,`, + `Sample_IDs:` + fmt.Sprintf("%v", this.Sample_IDs) + `,`, + `Samples:` + fmt.Sprintf("%v", this.Samples) + `,`, + `Histogram_IDs:` + fmt.Sprintf("%v", this.Histogram_IDs) + `,`, + `Histograms:` + repeatedStringForHistograms + `,`, + `}`, + }, "") + return s +} +func (this *Label) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Label{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func valueToStringQuerier(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *SeriesRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuerier + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SeriesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SeriesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field QueryID", wireType) + } + m.QueryID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuerier + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.QueryID |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FragmentID", wireType) + } + m.FragmentID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuerier + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.FragmentID |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Batchsize", wireType) + } + m.Batchsize = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuerier + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Batchsize |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipQuerier(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuerier + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuerier + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NextRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuerier + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NextRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NextRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field QueryID", wireType) + } + m.QueryID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuerier + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.QueryID |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FragmentID", wireType) + } + m.FragmentID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuerier + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.FragmentID |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Batchsize", wireType) + } + m.Batchsize = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuerier + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Batchsize |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipQuerier(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuerier + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuerier + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SeriesBatch) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuerier + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SeriesBatch: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SeriesBatch: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OneSeries", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuerier + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuerier + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuerier + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OneSeries = append(m.OneSeries, &OneSeries{}) + if err := m.OneSeries[len(m.OneSeries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuerier(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuerier + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuerier + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OneSeries) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuerier + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OneSeries: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OneSeries: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuerier + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuerier + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuerier + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Labels = append(m.Labels, &Label{}) + if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuerier(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuerier + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuerier + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StepVectorBatch) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuerier + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StepVectorBatch: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StepVectorBatch: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StepVectors", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuerier + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuerier + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuerier + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StepVectors = append(m.StepVectors, &StepVector{}) + if err := m.StepVectors[len(m.StepVectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuerier(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuerier + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuerier + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StepVector) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuerier + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StepVector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StepVector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field T", wireType) + } + m.T = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuerier + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.T |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuerier + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Sample_IDs = append(m.Sample_IDs, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuerier + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthQuerier + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthQuerier + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.Sample_IDs) == 0 { + m.Sample_IDs = make([]uint64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuerier + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Sample_IDs = append(m.Sample_IDs, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Sample_IDs", wireType) + } + case 3: + if wireType == 1 { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.Samples = append(m.Samples, v2) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuerier + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthQuerier + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthQuerier + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + elementCount = packedLen / 8 + if elementCount != 0 && len(m.Samples) == 0 { + m.Samples = make([]float64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.Samples = append(m.Samples, v2) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Samples", wireType) + } + case 4: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuerier + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Histogram_IDs = append(m.Histogram_IDs, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuerier + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthQuerier + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthQuerier + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.Histogram_IDs) == 0 { + m.Histogram_IDs = make([]uint64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuerier + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Histogram_IDs = append(m.Histogram_IDs, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Histogram_IDs", wireType) + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Histograms", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuerier + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuerier + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuerier + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Histograms = append(m.Histograms, cortexpb.Histogram{}) + if err := m.Histograms[len(m.Histograms)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuerier(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuerier + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuerier + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Label) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuerier + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Label: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Label: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuerier + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuerier + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuerier + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuerier + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuerier + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuerier + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuerier(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthQuerier + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthQuerier + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipQuerier(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuerier + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuerier + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuerier + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthQuerier + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthQuerier + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuerier + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipQuerier(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthQuerier + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthQuerier = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowQuerier = fmt.Errorf("proto: integer overflow") +) diff --git a/pkg/distributed_execution/querierpb/querier.proto b/pkg/distributed_execution/querierpb/querier.proto new file mode 100644 index 0000000000..04b2dbbc31 --- /dev/null +++ b/pkg/distributed_execution/querierpb/querier.proto @@ -0,0 +1,66 @@ +syntax = "proto3"; + +package querierpb; + +option go_package = "querierpb"; + +import "gogoproto/gogo.proto"; +import "github.com/cortexproject/cortex/pkg/cortexpb/cortex.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.unmarshaler_all) = true; + +// Querier service facilitates inter-querier communication for distributed query execution. +// It supports two types of streaming: +// 1. Series: For discovering the shape of incoming data +// 2. Next: For retrieving the actual data +service Querier { + rpc Series(SeriesRequest) returns (stream SeriesBatch) {} + rpc Next(NextRequest) returns (stream StepVectorBatch) {} +} + +// SeriesRequest contains parameters for series streaming +message SeriesRequest { + uint64 queryID = 1; // Unique identifier for the query + uint64 fragmentID = 2; // Identifier for the specific query fragment + int64 batchsize = 3; // Number of batches to send during streaming +} + +// NextRequest contains parameters for data streaming +message NextRequest { + uint64 queryID = 1; // Unique identifier for the query + uint64 fragmentID = 2; // Identifier for the specific query fragment + int64 batchsize = 3; // Number of batches to send during streaming +} + +// SeriesBatch contains a collection of series metadata +message SeriesBatch { + repeated OneSeries OneSeries = 1; // Array of series metadata +} + +// OneSeries represents a single time series with its labels +// Used to describe the shape of incoming data +message OneSeries { + repeated Label labels = 1; // Labels defining the series +} + +// StepVectorBatch contains a collection of step vectors +message StepVectorBatch { + repeated StepVector step_vectors = 1; // Array of step vectors +} + +// StepVector represents data points at a specific timestamp +message StepVector { + int64 t = 1; // Timestamp + repeated uint64 sample_IDs = 2; // IDs for sample data + repeated double samples = 3; // Sample values + repeated uint64 histogram_IDs = 4; // IDs for histogram data + repeated cortexpb.Histogram histograms = 5 + [(gogoproto.nullable) = false]; // Histogram values +} + +// Label represents a key-value pair for series metadata +message Label { + string name = 1; // Label name + string value = 2; // Label value +} diff --git a/pkg/distributed_execution/remote_node.go b/pkg/distributed_execution/remote_node.go new file mode 100644 index 0000000000..04a146570c --- /dev/null +++ b/pkg/distributed_execution/remote_node.go @@ -0,0 +1,71 @@ +package distributed_execution + +import ( + "encoding/json" + "fmt" + + "github.com/prometheus/prometheus/promql/parser" + "github.com/thanos-io/promql-engine/logicalplan" +) + +const ( + RemoteNode = "RemoteNode" +) + +// (to verify interface implementations) +var _ logicalplan.Node = (*Remote)(nil) + +// Remote is a custom node that marks where the portion of logical plan +// that needs to be executed remotely +type Remote struct { + Expr logicalplan.Node `json:"-"` + + FragmentKey FragmentKey + FragmentAddr string +} + +func NewRemoteNode(Expr logicalplan.Node) logicalplan.Node { + return &Remote{ + // initialize the fragment key pointer first + Expr: Expr, + FragmentKey: FragmentKey{}, + } +} +func (r *Remote) Clone() logicalplan.Node { + return &Remote{Expr: r.Expr.Clone(), FragmentKey: r.FragmentKey, FragmentAddr: r.FragmentAddr} +} +func (r *Remote) Children() []*logicalplan.Node { + return []*logicalplan.Node{&r.Expr} +} +func (r *Remote) String() string { + return fmt.Sprintf("remote(%s)", r.Expr.String()) +} +func (r *Remote) ReturnType() parser.ValueType { + return r.Expr.ReturnType() +} +func (r *Remote) Type() logicalplan.NodeType { return RemoteNode } + +type remote struct { + QueryID uint64 + FragmentID uint64 + FragmentAddr string +} + +func (r *Remote) MarshalJSON() ([]byte, error) { + return json.Marshal(remote{ + QueryID: r.FragmentKey.queryID, + FragmentID: r.FragmentKey.fragmentID, + FragmentAddr: r.FragmentAddr, + }) +} + +func (r *Remote) UnmarshalJSON(data []byte) error { + re := remote{} + if err := json.Unmarshal(data, &re); err != nil { + return err + } + + r.FragmentKey = MakeFragmentKey(re.QueryID, re.FragmentID) + r.FragmentAddr = re.FragmentAddr + return nil +} diff --git a/pkg/distributed_execution/remote_node_test.go b/pkg/distributed_execution/remote_node_test.go new file mode 100644 index 0000000000..e94173c620 --- /dev/null +++ b/pkg/distributed_execution/remote_node_test.go @@ -0,0 +1,54 @@ +package distributed_execution + +import ( + "testing" + + "github.com/stretchr/testify/require" + "github.com/thanos-io/promql-engine/logicalplan" +) + +func TestRemoteNode(t *testing.T) { + t.Run("NewRemoteNode creates valid node", func(t *testing.T) { + node := &Remote{} + require.NotNil(t, node) + require.IsType(t, &Remote{}, node) + require.Equal(t, (&Remote{}).Type(), node.Type()) + }) + + t.Run("Clone creates correct copy", func(t *testing.T) { + original := &Remote{ + FragmentKey: FragmentKey{queryID: 1, fragmentID: 2}, + FragmentAddr: "[IP_ADDRESS]:9090", + Expr: &logicalplan.NumberLiteral{Val: 42}, + } + + cloned := original.Clone() + require.NotNil(t, cloned) + + remote, ok := cloned.(*Remote) + require.True(t, ok) + require.Equal(t, original.FragmentKey, remote.FragmentKey) + require.Equal(t, original.FragmentAddr, remote.FragmentAddr) + require.Equal(t, original.Expr.String(), remote.Expr.String()) + }) + + t.Run("Children returns correct nodes", func(t *testing.T) { + expr := &logicalplan.NumberLiteral{Val: 42} + node := &Remote{ + Expr: expr, + } + + children := node.Children() + require.Len(t, children, 1) + require.Equal(t, expr, *children[0]) + }) + + t.Run("ReturnType matches expression type", func(t *testing.T) { + expr := &logicalplan.NumberLiteral{Val: 42} + node := &Remote{ + Expr: expr, + } + + require.Equal(t, expr.ReturnType(), node.ReturnType()) + }) +} diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go index 0fc11c19d1..5b32804dac 100644 --- a/pkg/distributor/distributor.go +++ b/pkg/distributor/distributor.go @@ -6,6 +6,7 @@ import ( "fmt" "io" "net/http" + "slices" "sort" "strings" "sync" @@ -40,6 +41,7 @@ import ( "github.com/cortexproject/cortex/pkg/util/limiter" util_log "github.com/cortexproject/cortex/pkg/util/log" util_math "github.com/cortexproject/cortex/pkg/util/math" + "github.com/cortexproject/cortex/pkg/util/requestmeta" "github.com/cortexproject/cortex/pkg/util/services" "github.com/cortexproject/cortex/pkg/util/validation" ) @@ -153,6 +155,7 @@ type Config struct { ExtendWrites bool `yaml:"extend_writes"` SignWriteRequestsEnabled bool `yaml:"sign_write_requests"` UseStreamPush bool `yaml:"use_stream_push"` + RemoteWriteV2Enabled bool `yaml:"remote_writev2_enabled"` // Distributors ring DistributorRing RingConfig `yaml:"ring"` @@ -191,8 +194,10 @@ type InstanceLimits struct { } type OTLPConfig struct { - ConvertAllAttributes bool `yaml:"convert_all_attributes"` - DisableTargetInfo bool `yaml:"disable_target_info"` + ConvertAllAttributes bool `yaml:"convert_all_attributes"` + DisableTargetInfo bool `yaml:"disable_target_info"` + AllowDeltaTemporality bool `yaml:"allow_delta_temporality"` + EnableTypeAndUnitLabels bool `yaml:"enable_type_and_unit_labels"` } // RegisterFlags adds the flags required to config this to the given FlagSet @@ -212,6 +217,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.BoolVar(&cfg.ExtendWrites, "distributor.extend-writes", true, "Try writing to an additional ingester in the presence of an ingester not in the ACTIVE state. It is useful to disable this along with -ingester.unregister-on-shutdown=false in order to not spread samples to extra ingesters during rolling restarts with consistent naming.") f.BoolVar(&cfg.ZoneResultsQuorumMetadata, "distributor.zone-results-quorum-metadata", false, "Experimental, this flag may change in the future. If zone awareness and this both enabled, when querying metadata APIs (labels names and values for now), only results from quorum number of zones will be included.") f.IntVar(&cfg.NumPushWorkers, "distributor.num-push-workers", 0, "EXPERIMENTAL: Number of go routines to handle push calls from distributors to ingesters. When no workers are available, a new goroutine will be spawned automatically. If set to 0 (default), workers are disabled, and a new goroutine will be created for each push request.") + f.BoolVar(&cfg.RemoteWriteV2Enabled, "distributor.remote-writev2-enabled", false, "EXPERIMENTAL: If true, accept prometheus remote write v2 protocol push request.") f.Float64Var(&cfg.InstanceLimits.MaxIngestionRate, "distributor.instance-limits.max-ingestion-rate", 0, "Max ingestion rate (samples/sec) that this distributor will accept. This limit is per-distributor, not per-tenant. Additional push requests will be rejected. Current ingestion rate is computed as exponentially weighted moving average, updated every second. 0 = unlimited.") f.IntVar(&cfg.InstanceLimits.MaxInflightPushRequests, "distributor.instance-limits.max-inflight-push-requests", 0, "Max inflight push requests that this distributor can handle. This limit is per-distributor, not per-tenant. Additional requests will be rejected. 0 = unlimited.") @@ -219,11 +225,13 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.BoolVar(&cfg.OTLPConfig.ConvertAllAttributes, "distributor.otlp.convert-all-attributes", false, "If true, all resource attributes are converted to labels.") f.BoolVar(&cfg.OTLPConfig.DisableTargetInfo, "distributor.otlp.disable-target-info", false, "If true, a target_info metric is not ingested. (refer to: https://github.com/prometheus/OpenMetrics/blob/main/specification/OpenMetrics.md#supporting-target-metadata-in-both-push-based-and-pull-based-systems)") + f.BoolVar(&cfg.OTLPConfig.AllowDeltaTemporality, "distributor.otlp.allow-delta-temporality", false, "EXPERIMENTAL: If true, delta temporality otlp metrics to be ingested.") + f.BoolVar(&cfg.OTLPConfig.EnableTypeAndUnitLabels, "distributor.otlp.enable-type-and-unit-labels", false, "EXPERIMENTAL: If true, the '__type__' and '__unit__' labels are added for the OTLP metrics.") } // Validate config and returns error on failure func (cfg *Config) Validate(limits validation.Limits) error { - if !util.StringsContain(supportedShardingStrategies, cfg.ShardingStrategy) { + if !slices.Contains(supportedShardingStrategies, cfg.ShardingStrategy) { return errInvalidShardingStrategy } @@ -771,16 +779,7 @@ func (d *Distributor) Push(ctx context.Context, req *cortexpb.WriteRequest) (*co d.receivedExemplars.WithLabelValues(userID).Add(float64(validatedExemplars)) d.receivedMetadata.WithLabelValues(userID).Add(float64(len(validatedMetadata))) - if !d.nativeHistogramIngestionRateLimiter.AllowN(now, userID, validatedHistogramSamples) { - level.Warn(d.log).Log("msg", "native histogram ingestion rate limit (%v) exceeded while adding %d native histogram samples", d.nativeHistogramIngestionRateLimiter.Limit(now, userID), validatedHistogramSamples) - d.validateMetrics.DiscardedSamples.WithLabelValues(validation.NativeHistogramRateLimited, userID).Add(float64(validatedHistogramSamples)) - validatedHistogramSamples = 0 - } else { - seriesKeys = append(seriesKeys, nhSeriesKeys...) - validatedTimeseries = append(validatedTimeseries, nhValidatedTimeseries...) - } - - if len(seriesKeys) == 0 && len(metadataKeys) == 0 { + if len(seriesKeys) == 0 && len(nhSeriesKeys) == 0 && len(metadataKeys) == 0 { // Ensure the request slice is reused if there's no series or metadata passing the validation. cortexpb.ReuseSlice(req.Timeseries) @@ -805,6 +804,17 @@ func (d *Distributor) Push(ctx context.Context, req *cortexpb.WriteRequest) (*co // totalN included samples and metadata. Ingester follows this pattern when computing its ingestion rate. d.ingestionRate.Add(int64(totalN)) + var nativeHistogramErr error + + if !d.nativeHistogramIngestionRateLimiter.AllowN(now, userID, validatedHistogramSamples) { + d.validateMetrics.DiscardedSamples.WithLabelValues(validation.NativeHistogramRateLimited, userID).Add(float64(validatedHistogramSamples)) + nativeHistogramErr = httpgrpc.Errorf(http.StatusTooManyRequests, "native histogram ingestion rate limit (%v) exceeded while adding %d native histogram samples", d.nativeHistogramIngestionRateLimiter.Limit(now, userID), validatedHistogramSamples) + validatedHistogramSamples = 0 + } else { + seriesKeys = append(seriesKeys, nhSeriesKeys...) + validatedTimeseries = append(validatedTimeseries, nhValidatedTimeseries...) + } + subRing := d.ingestersRing // Obtain a subring if required. @@ -815,12 +825,30 @@ func (d *Distributor) Push(ctx context.Context, req *cortexpb.WriteRequest) (*co keys := append(seriesKeys, metadataKeys...) initialMetadataIndex := len(seriesKeys) + if len(keys) == 0 && nativeHistogramErr != nil { + return nil, nativeHistogramErr + } + err = d.doBatch(ctx, req, subRing, keys, initialMetadataIndex, validatedMetadata, validatedTimeseries, userID) if err != nil { return nil, err } - return &cortexpb.WriteResponse{}, firstPartialErr + resp := &cortexpb.WriteResponse{} + if d.cfg.RemoteWriteV2Enabled { + // We simply expose validated samples, histograms, and exemplars + // to the header. We should improve it to expose the actual + // written values by the Ingesters. + resp.Samples = int64(validatedFloatSamples) + resp.Histograms = int64(validatedHistogramSamples) + resp.Exemplars = int64(validatedExemplars) + } + + if nativeHistogramErr != nil { + return resp, nativeHistogramErr + } + + return resp, firstPartialErr } func (d *Distributor) updateLabelSetMetrics() { @@ -892,9 +920,9 @@ func (d *Distributor) doBatch(ctx context.Context, req *cortexpb.WriteRequest, s if sp := opentracing.SpanFromContext(ctx); sp != nil { localCtx = opentracing.ContextWithSpan(localCtx, sp) } - // Get any HTTP headers that are supposed to be added to logs and add to localCtx for later use - if headerMap := util_log.HeaderMapFromContext(ctx); headerMap != nil { - localCtx = util_log.ContextWithHeaderMap(localCtx, headerMap) + // Get any HTTP request metadata that are supposed to be added to logs and add to localCtx for later use + if requestContextMap := requestmeta.MapFromContext(ctx); requestContextMap != nil { + localCtx = requestmeta.ContextWithRequestMetadataMap(localCtx, requestContextMap) } // Get clientIP(s) from Context and add it to localCtx source := util.GetSourceIPsFromOutgoingCtx(ctx) @@ -1017,7 +1045,7 @@ func (d *Distributor) prepareSeriesKeys(ctx context.Context, req *cortexpb.Write if mrc := limits.MetricRelabelConfigs; len(mrc) > 0 { l, _ := relabel.Process(cortexpb.FromLabelAdaptersToLabels(ts.Labels), mrc...) - if len(l) == 0 { + if l.Len() == 0 { // all labels are gone, samples will be discarded d.validateMetrics.DiscardedSamples.WithLabelValues( validation.DroppedByRelabelConfiguration, @@ -1216,8 +1244,8 @@ func getErrorStatus(err error) string { } // ForReplicationSet runs f, in parallel, for all ingesters in the input replication set. -func (d *Distributor) ForReplicationSet(ctx context.Context, replicationSet ring.ReplicationSet, zoneResultsQuorum bool, partialDataEnabled bool, f func(context.Context, ingester_client.IngesterClient) (interface{}, error)) ([]interface{}, error) { - return replicationSet.Do(ctx, d.cfg.ExtraQueryDelay, zoneResultsQuorum, partialDataEnabled, func(ctx context.Context, ing *ring.InstanceDesc) (interface{}, error) { +func (d *Distributor) ForReplicationSet(ctx context.Context, replicationSet ring.ReplicationSet, zoneResultsQuorum bool, partialDataEnabled bool, f func(context.Context, ingester_client.IngesterClient) (any, error)) ([]any, error) { + return replicationSet.Do(ctx, d.cfg.ExtraQueryDelay, zoneResultsQuorum, partialDataEnabled, func(ctx context.Context, ing *ring.InstanceDesc) (any, error) { client, err := d.ingesterPool.GetClientFor(ing.Addr) if err != nil { return nil, err @@ -1227,7 +1255,7 @@ func (d *Distributor) ForReplicationSet(ctx context.Context, replicationSet ring }) } -func (d *Distributor) LabelValuesForLabelNameCommon(ctx context.Context, from, to model.Time, labelName model.LabelName, hints *storage.LabelHints, f func(ctx context.Context, rs ring.ReplicationSet, req *ingester_client.LabelValuesRequest, limiter *limiter.QueryLimiter) ([]interface{}, error), matchers ...*labels.Matcher) ([]string, error) { +func (d *Distributor) LabelValuesForLabelNameCommon(ctx context.Context, from, to model.Time, labelName model.LabelName, hints *storage.LabelHints, f func(ctx context.Context, rs ring.ReplicationSet, req *ingester_client.LabelValuesRequest, limiter *limiter.QueryLimiter) ([]any, error), matchers ...*labels.Matcher) ([]string, error) { span, ctx := opentracing.StartSpanFromContext(ctx, "Distributor.LabelValues", opentracing.Tags{ "name": labelName, "start": from.Unix(), @@ -1269,8 +1297,8 @@ func (d *Distributor) LabelValuesForLabelNameCommon(ctx context.Context, from, t // LabelValuesForLabelName returns all the label values that are associated with a given label name. func (d *Distributor) LabelValuesForLabelName(ctx context.Context, from, to model.Time, labelName model.LabelName, hint *storage.LabelHints, partialDataEnabled bool, matchers ...*labels.Matcher) ([]string, error) { - return d.LabelValuesForLabelNameCommon(ctx, from, to, labelName, hint, func(ctx context.Context, rs ring.ReplicationSet, req *ingester_client.LabelValuesRequest, queryLimiter *limiter.QueryLimiter) ([]interface{}, error) { - return d.ForReplicationSet(ctx, rs, d.cfg.ZoneResultsQuorumMetadata, partialDataEnabled, func(ctx context.Context, client ingester_client.IngesterClient) (interface{}, error) { + return d.LabelValuesForLabelNameCommon(ctx, from, to, labelName, hint, func(ctx context.Context, rs ring.ReplicationSet, req *ingester_client.LabelValuesRequest, queryLimiter *limiter.QueryLimiter) ([]any, error) { + return d.ForReplicationSet(ctx, rs, d.cfg.ZoneResultsQuorumMetadata, partialDataEnabled, func(ctx context.Context, client ingester_client.IngesterClient) (any, error) { resp, err := client.LabelValues(ctx, req) if err != nil { return nil, err @@ -1285,8 +1313,8 @@ func (d *Distributor) LabelValuesForLabelName(ctx context.Context, from, to mode // LabelValuesForLabelNameStream returns all the label values that are associated with a given label name. func (d *Distributor) LabelValuesForLabelNameStream(ctx context.Context, from, to model.Time, labelName model.LabelName, hint *storage.LabelHints, partialDataEnabled bool, matchers ...*labels.Matcher) ([]string, error) { - return d.LabelValuesForLabelNameCommon(ctx, from, to, labelName, hint, func(ctx context.Context, rs ring.ReplicationSet, req *ingester_client.LabelValuesRequest, queryLimiter *limiter.QueryLimiter) ([]interface{}, error) { - return d.ForReplicationSet(ctx, rs, d.cfg.ZoneResultsQuorumMetadata, partialDataEnabled, func(ctx context.Context, client ingester_client.IngesterClient) (interface{}, error) { + return d.LabelValuesForLabelNameCommon(ctx, from, to, labelName, hint, func(ctx context.Context, rs ring.ReplicationSet, req *ingester_client.LabelValuesRequest, queryLimiter *limiter.QueryLimiter) ([]any, error) { + return d.ForReplicationSet(ctx, rs, d.cfg.ZoneResultsQuorumMetadata, partialDataEnabled, func(ctx context.Context, client ingester_client.IngesterClient) (any, error) { stream, err := client.LabelValuesStream(ctx, req) if err != nil { return nil, err @@ -1312,7 +1340,7 @@ func (d *Distributor) LabelValuesForLabelNameStream(ctx context.Context, from, t }, matchers...) } -func (d *Distributor) LabelNamesCommon(ctx context.Context, from, to model.Time, hints *storage.LabelHints, f func(ctx context.Context, rs ring.ReplicationSet, req *ingester_client.LabelNamesRequest, limiter *limiter.QueryLimiter) ([]interface{}, error), matchers ...*labels.Matcher) ([]string, error) { +func (d *Distributor) LabelNamesCommon(ctx context.Context, from, to model.Time, hints *storage.LabelHints, f func(ctx context.Context, rs ring.ReplicationSet, req *ingester_client.LabelNamesRequest, limiter *limiter.QueryLimiter) ([]any, error), matchers ...*labels.Matcher) ([]string, error) { span, ctx := opentracing.StartSpanFromContext(ctx, "Distributor.LabelNames", opentracing.Tags{ "start": from.Unix(), "end": to.Unix(), @@ -1355,8 +1383,8 @@ func (d *Distributor) LabelNamesCommon(ctx context.Context, from, to model.Time, } func (d *Distributor) LabelNamesStream(ctx context.Context, from, to model.Time, hints *storage.LabelHints, partialDataEnabled bool, matchers ...*labels.Matcher) ([]string, error) { - return d.LabelNamesCommon(ctx, from, to, hints, func(ctx context.Context, rs ring.ReplicationSet, req *ingester_client.LabelNamesRequest, queryLimiter *limiter.QueryLimiter) ([]interface{}, error) { - return d.ForReplicationSet(ctx, rs, d.cfg.ZoneResultsQuorumMetadata, partialDataEnabled, func(ctx context.Context, client ingester_client.IngesterClient) (interface{}, error) { + return d.LabelNamesCommon(ctx, from, to, hints, func(ctx context.Context, rs ring.ReplicationSet, req *ingester_client.LabelNamesRequest, queryLimiter *limiter.QueryLimiter) ([]any, error) { + return d.ForReplicationSet(ctx, rs, d.cfg.ZoneResultsQuorumMetadata, partialDataEnabled, func(ctx context.Context, client ingester_client.IngesterClient) (any, error) { stream, err := client.LabelNamesStream(ctx, req) if err != nil { return nil, err @@ -1384,8 +1412,8 @@ func (d *Distributor) LabelNamesStream(ctx context.Context, from, to model.Time, // LabelNames returns all the label names. func (d *Distributor) LabelNames(ctx context.Context, from, to model.Time, hint *storage.LabelHints, partialDataEnabled bool, matchers ...*labels.Matcher) ([]string, error) { - return d.LabelNamesCommon(ctx, from, to, hint, func(ctx context.Context, rs ring.ReplicationSet, req *ingester_client.LabelNamesRequest, queryLimiter *limiter.QueryLimiter) ([]interface{}, error) { - return d.ForReplicationSet(ctx, rs, d.cfg.ZoneResultsQuorumMetadata, partialDataEnabled, func(ctx context.Context, client ingester_client.IngesterClient) (interface{}, error) { + return d.LabelNamesCommon(ctx, from, to, hint, func(ctx context.Context, rs ring.ReplicationSet, req *ingester_client.LabelNamesRequest, queryLimiter *limiter.QueryLimiter) ([]any, error) { + return d.ForReplicationSet(ctx, rs, d.cfg.ZoneResultsQuorumMetadata, partialDataEnabled, func(ctx context.Context, client ingester_client.IngesterClient) (any, error) { resp, err := client.LabelNames(ctx, req) if err != nil { return nil, err @@ -1402,7 +1430,7 @@ func (d *Distributor) LabelNames(ctx context.Context, from, to model.Time, hint // MetricsForLabelMatchers gets the metrics that match said matchers func (d *Distributor) MetricsForLabelMatchers(ctx context.Context, from, through model.Time, hint *storage.SelectHints, partialDataEnabled bool, matchers ...*labels.Matcher) ([]labels.Labels, error) { return d.metricsForLabelMatchersCommon(ctx, from, through, hint, func(ctx context.Context, rs ring.ReplicationSet, req *ingester_client.MetricsForLabelMatchersRequest, metrics *map[model.Fingerprint]labels.Labels, mutex *sync.Mutex, queryLimiter *limiter.QueryLimiter) error { - _, err := d.ForReplicationSet(ctx, rs, false, partialDataEnabled, func(ctx context.Context, client ingester_client.IngesterClient) (interface{}, error) { + _, err := d.ForReplicationSet(ctx, rs, false, partialDataEnabled, func(ctx context.Context, client ingester_client.IngesterClient) (any, error) { resp, err := client.MetricsForLabelMatchers(ctx, req) if err != nil { return nil, err @@ -1431,7 +1459,7 @@ func (d *Distributor) MetricsForLabelMatchers(ctx context.Context, from, through func (d *Distributor) MetricsForLabelMatchersStream(ctx context.Context, from, through model.Time, hint *storage.SelectHints, partialDataEnabled bool, matchers ...*labels.Matcher) ([]labels.Labels, error) { return d.metricsForLabelMatchersCommon(ctx, from, through, hint, func(ctx context.Context, rs ring.ReplicationSet, req *ingester_client.MetricsForLabelMatchersRequest, metrics *map[model.Fingerprint]labels.Labels, mutex *sync.Mutex, queryLimiter *limiter.QueryLimiter) error { - _, err := d.ForReplicationSet(ctx, rs, false, partialDataEnabled, func(ctx context.Context, client ingester_client.IngesterClient) (interface{}, error) { + _, err := d.ForReplicationSet(ctx, rs, false, partialDataEnabled, func(ctx context.Context, client ingester_client.IngesterClient) (any, error) { stream, err := client.MetricsForLabelMatchersStream(ctx, req) if err != nil { return nil, err @@ -1506,7 +1534,7 @@ func (d *Distributor) MetricsMetadata(ctx context.Context, req *ingester_client. } // TODO(gotjosh): We only need to look in all the ingesters if shardByAllLabels is enabled. - resps, err := d.ForReplicationSet(ctx, replicationSet, d.cfg.ZoneResultsQuorumMetadata, false, func(ctx context.Context, client ingester_client.IngesterClient) (interface{}, error) { + resps, err := d.ForReplicationSet(ctx, replicationSet, d.cfg.ZoneResultsQuorumMetadata, false, func(ctx context.Context, client ingester_client.IngesterClient) (any, error) { return client.MetricsMetadata(ctx, req) }) if err != nil { @@ -1548,7 +1576,7 @@ func (d *Distributor) UserStats(ctx context.Context) (*ingester.UserStats, error replicationSet.MaxErrors = 0 req := &ingester_client.UserStatsRequest{} - resps, err := d.ForReplicationSet(ctx, replicationSet, false, false, func(ctx context.Context, client ingester_client.IngesterClient) (interface{}, error) { + resps, err := d.ForReplicationSet(ctx, replicationSet, false, false, func(ctx context.Context, client ingester_client.IngesterClient) (any, error) { return client.UserStats(ctx, req) }) if err != nil { @@ -1575,26 +1603,31 @@ func (d *Distributor) UserStats(ctx context.Context) (*ingester.UserStats, error // AllUserStats returns statistics about all users. // Note it does not divide by the ReplicationFactor like UserStats() -func (d *Distributor) AllUserStats(ctx context.Context) ([]ingester.UserIDStats, error) { +func (d *Distributor) AllUserStats(ctx context.Context) ([]ingester.UserIDStats, int, error) { // Add up by user, across all responses from ingesters perUserTotals := make(map[string]ingester.UserStats) + queriedIngesterNum := 0 req := &ingester_client.UserStatsRequest{} ctx = user.InjectOrgID(ctx, "1") // fake: ingester insists on having an org ID - // Not using d.ForReplicationSet(), so we can fail after first error. replicationSet, err := d.ingestersRing.GetAllHealthy(ring.Read) if err != nil { - return nil, err + return nil, 0, err } for _, ingester := range replicationSet.Instances { client, err := d.ingesterPool.GetClientFor(ingester.Addr) if err != nil { - return nil, err + return nil, 0, err } resp, err := client.(ingester_client.IngesterClient).AllUserStats(ctx, req) if err != nil { - return nil, err + // During an ingester rolling update, an ingester might be temporarily + // in stopping or starting state. Therefore, returning an error would + // cause the API to fail during the update. This is an expected error in + // that scenario, we continue the loop to work API. + continue } + queriedIngesterNum++ for _, u := range resp.Stats { s := perUserTotals[u.UserId] s.IngestionRate += u.Data.IngestionRate @@ -1603,6 +1636,7 @@ func (d *Distributor) AllUserStats(ctx context.Context) ([]ingester.UserIDStats, s.NumSeries += u.Data.NumSeries s.ActiveSeries += u.Data.ActiveSeries s.LoadedBlocks += u.Data.LoadedBlocks + s.QueriedIngesters += 1 perUserTotals[u.UserId] = s } } @@ -1619,22 +1653,23 @@ func (d *Distributor) AllUserStats(ctx context.Context) ([]ingester.UserIDStats, NumSeries: stats.NumSeries, ActiveSeries: stats.ActiveSeries, LoadedBlocks: stats.LoadedBlocks, + QueriedIngesters: stats.QueriedIngesters, }, }) } - return response, nil + return response, queriedIngesterNum, nil } // AllUserStatsHandler shows stats for all users. func (d *Distributor) AllUserStatsHandler(w http.ResponseWriter, r *http.Request) { - stats, err := d.AllUserStats(r.Context()) + stats, queriedIngesterNum, err := d.AllUserStats(r.Context()) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } - ingester.AllUserStatsRender(w, r, stats, d.ingestersRing.ReplicationFactor()) + ingester.AllUserStatsRender(w, r, stats, d.ingestersRing.ReplicationFactor(), queriedIngesterNum) } func (d *Distributor) ServeHTTP(w http.ResponseWriter, req *http.Request) { diff --git a/pkg/distributor/distributor_ring.go b/pkg/distributor/distributor_ring.go index f1b0fa2fb3..5a49fa7a71 100644 --- a/pkg/distributor/distributor_ring.go +++ b/pkg/distributor/distributor_ring.go @@ -18,9 +18,10 @@ import ( // is used to strip down the config to the minimum, and avoid confusion // to the user. type RingConfig struct { - KVStore kv.Config `yaml:"kvstore"` - HeartbeatPeriod time.Duration `yaml:"heartbeat_period"` - HeartbeatTimeout time.Duration `yaml:"heartbeat_timeout"` + KVStore kv.Config `yaml:"kvstore"` + HeartbeatPeriod time.Duration `yaml:"heartbeat_period"` + HeartbeatTimeout time.Duration `yaml:"heartbeat_timeout"` + DetailedMetricsEnabled bool `yaml:"detailed_metrics_enabled"` // Instance details InstanceID string `yaml:"instance_id" doc:"hidden"` @@ -44,6 +45,7 @@ func (cfg *RingConfig) RegisterFlags(f *flag.FlagSet) { cfg.KVStore.RegisterFlagsWithPrefix("distributor.ring.", "collectors/", f) f.DurationVar(&cfg.HeartbeatPeriod, "distributor.ring.heartbeat-period", 5*time.Second, "Period at which to heartbeat to the ring. 0 = disabled.") f.DurationVar(&cfg.HeartbeatTimeout, "distributor.ring.heartbeat-timeout", time.Minute, "The heartbeat timeout after which distributors are considered unhealthy within the ring. 0 = never (timeout disabled).") + f.BoolVar(&cfg.DetailedMetricsEnabled, "distributor.ring.detailed-metrics-enabled", true, "Set to true to enable ring detailed metrics. These metrics provide detailed information, such as token count and ownership per tenant. Disabling them can significantly decrease the number of metrics emitted.") // Instance flags cfg.InstanceInterfaceNames = []string{"eth0", "en0"} @@ -94,6 +96,7 @@ func (cfg *RingConfig) ToRingConfig() ring.Config { rc.KVStore = cfg.KVStore rc.HeartbeatTimeout = cfg.HeartbeatTimeout rc.ReplicationFactor = 1 + rc.DetailedMetricsEnabled = cfg.DetailedMetricsEnabled return rc } diff --git a/pkg/distributor/distributor_test.go b/pkg/distributor/distributor_test.go index 5ad019c4bf..fd50aef9d1 100644 --- a/pkg/distributor/distributor_test.go +++ b/pkg/distributor/distributor_test.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "io" + "maps" "math" "math/rand" "net/http" @@ -116,7 +117,7 @@ func TestConfig_Validate(t *testing.T) { } for testName, testData := range tests { - testData := testData // Needed for t.Parallel to work correctly + // Needed for t.Parallel to work correctly t.Run(testName, func(t *testing.T) { t.Parallel() cfg := Config{} @@ -390,7 +391,7 @@ func TestDistributor_Push(t *testing.T) { // yet. To avoid flaky test we retry metrics assertion until we hit the desired state (no error) // within a reasonable timeout. if tc.expectedMetrics != "" { - test.Poll(t, time.Second, nil, func() interface{} { + test.Poll(t, time.Second, nil, func() any { return testutil.GatherAndCompare(regs[0], strings.NewReader(tc.expectedMetrics), tc.metricNames...) }) } @@ -527,13 +528,13 @@ func TestDistributor_MetricsCleanup(t *testing.T) { d.cleanupInactiveUser("userA") - err := r.KVClient.CAS(context.Background(), ingester.RingKey, func(in interface{}) (interface{}, bool, error) { + err := r.KVClient.CAS(context.Background(), ingester.RingKey, func(in any) (any, bool, error) { r := in.(*ring.Desc) delete(r.Ingesters, "ingester-0") return in, true, nil }) - test.Poll(t, time.Second, true, func() interface{} { + test.Poll(t, time.Second, true, func() any { ings, _, _ := r.GetAllInstanceDescs(ring.Write) return len(ings) == 1 }) @@ -637,10 +638,8 @@ func TestDistributor_PushIngestionRateLimiter(t *testing.T) { } for testName, testData := range tests { - testData := testData for _, enableHistogram := range []bool{false, true} { - enableHistogram := enableHistogram t.Run(fmt.Sprintf("%s, histogram=%s", testName, strconv.FormatBool(enableHistogram)), func(t *testing.T) { t.Parallel() limits := &validation.Limits{} @@ -689,6 +688,7 @@ func TestDistributor_PushIngestionRateLimiter_Histograms(t *testing.T) { metadata int expectedError error expectedNHDiscardedSampleMetricValue int + isPartialDrop bool } ctx := user.InjectOrgID(context.Background(), "user") @@ -705,32 +705,32 @@ func TestDistributor_PushIngestionRateLimiter_Histograms(t *testing.T) { "local strategy: native histograms limit should be set to each distributor": { distributors: 2, ingestionRateStrategy: validation.LocalIngestionRateStrategy, - ingestionRate: 20, - ingestionBurstSize: 20, + ingestionRate: 30, + ingestionBurstSize: 30, nativeHistogramIngestionRate: 10, nativeHistogramIngestionBurstSize: 10, pushes: []testPush{ {nhSamples: 4, expectedError: nil}, - {nhSamples: 6, expectedError: nil}, - {nhSamples: 6, expectedError: nil, expectedNHDiscardedSampleMetricValue: 6}, - {nhSamples: 4, metadata: 1, expectedError: nil, expectedNHDiscardedSampleMetricValue: 10}, - {nhSamples: 1, expectedError: nil, expectedNHDiscardedSampleMetricValue: 11}, - {nhSamples: 1, expectedError: nil, expectedNHDiscardedSampleMetricValue: 12}, + {metadata: 1, expectedError: nil}, + {nhSamples: 7, expectedError: httpgrpc.Errorf(http.StatusTooManyRequests, "native histogram ingestion rate limit (10) exceeded while adding 7 native histogram samples"), expectedNHDiscardedSampleMetricValue: 7}, + {nhSamples: 4, metadata: 1, expectedError: nil, expectedNHDiscardedSampleMetricValue: 7}, + {nhSamples: 3, expectedError: httpgrpc.Errorf(http.StatusTooManyRequests, "native histogram ingestion rate limit (10) exceeded while adding 3 native histogram samples"), expectedNHDiscardedSampleMetricValue: 10}, + {metadata: 1, expectedError: nil, expectedNHDiscardedSampleMetricValue: 10}, }, }, "global strategy: native histograms limit should be evenly shared across distributors": { distributors: 2, ingestionRateStrategy: validation.GlobalIngestionRateStrategy, - ingestionRate: 20, - ingestionBurstSize: 10, + ingestionRate: 40, + ingestionBurstSize: 20, nativeHistogramIngestionRate: 10, nativeHistogramIngestionBurstSize: 5, pushes: []testPush{ {nhSamples: 2, expectedError: nil}, {nhSamples: 1, expectedError: nil}, - {nhSamples: 3, metadata: 1, expectedError: nil, expectedNHDiscardedSampleMetricValue: 3}, - {nhSamples: 2, expectedError: nil, expectedNHDiscardedSampleMetricValue: 3}, - {nhSamples: 1, expectedError: nil, expectedNHDiscardedSampleMetricValue: 4}, + {nhSamples: 3, metadata: 1, expectedError: httpgrpc.Errorf(http.StatusTooManyRequests, "native histogram ingestion rate limit (5) exceeded while adding 3 native histogram samples"), expectedNHDiscardedSampleMetricValue: 3, isPartialDrop: true}, + {nhSamples: 1, expectedError: nil, expectedNHDiscardedSampleMetricValue: 3}, + {nhSamples: 2, expectedError: httpgrpc.Errorf(http.StatusTooManyRequests, "native histogram ingestion rate limit (5) exceeded while adding 2 native histogram samples"), expectedNHDiscardedSampleMetricValue: 5}, {nhSamples: 1, expectedError: nil, expectedNHDiscardedSampleMetricValue: 5}, }, }, @@ -744,44 +744,66 @@ func TestDistributor_PushIngestionRateLimiter_Histograms(t *testing.T) { pushes: []testPush{ {nhSamples: 10, expectedError: nil}, {nhSamples: 5, expectedError: nil}, - {nhSamples: 6, metadata: 1, expectedError: nil, expectedNHDiscardedSampleMetricValue: 6}, + {nhSamples: 6, metadata: 1, expectedError: httpgrpc.Errorf(http.StatusTooManyRequests, "native histogram ingestion rate limit (5) exceeded while adding 6 native histogram samples"), expectedNHDiscardedSampleMetricValue: 6, isPartialDrop: true}, {nhSamples: 5, expectedError: nil, expectedNHDiscardedSampleMetricValue: 6}, - {nhSamples: 1, expectedError: nil, expectedNHDiscardedSampleMetricValue: 7}, - {nhSamples: 1, expectedError: nil, expectedNHDiscardedSampleMetricValue: 8}, + {nhSamples: 1, expectedError: httpgrpc.Errorf(http.StatusTooManyRequests, "native histogram ingestion rate limit (5) exceeded while adding 1 native histogram samples"), expectedNHDiscardedSampleMetricValue: 7}, }, }, - "local strategy: If NH samples hit NH rate limit, other samples should succeed when under rate limit": { + "global strategy: Batch contains only NH samples and NH rate limit is hit": { distributors: 2, - ingestionRateStrategy: validation.LocalIngestionRateStrategy, + ingestionRateStrategy: validation.GlobalIngestionRateStrategy, ingestionRate: 20, ingestionBurstSize: 20, - nativeHistogramIngestionRate: 5, - nativeHistogramIngestionBurstSize: 5, + nativeHistogramIngestionRate: 10, + nativeHistogramIngestionBurstSize: 10, pushes: []testPush{ - {samples: 5, nhSamples: 4, expectedError: nil}, - {samples: 6, nhSamples: 2, expectedError: nil, expectedNHDiscardedSampleMetricValue: 2}, - {samples: 4, metadata: 1, nhSamples: 1, expectedError: httpgrpc.Errorf(http.StatusTooManyRequests, "ingestion rate limit (20) exceeded while adding 5 samples and 1 metadata"), expectedNHDiscardedSampleMetricValue: 2}, - {metadata: 1, expectedError: nil, expectedNHDiscardedSampleMetricValue: 2}, + {nhSamples: 2, expectedError: nil}, + {nhSamples: 3, expectedError: nil}, + {nhSamples: 6, expectedError: httpgrpc.Errorf(http.StatusTooManyRequests, "native histogram ingestion rate limit (5) exceeded while adding 6 native histogram samples"), expectedNHDiscardedSampleMetricValue: 6}, }, }, - "global strategy: If NH samples hit NH rate limit, other samples should succeed when under rate limit": { + "global strategy: Batch contains only NH samples and metadata and NH rate limit is hit": { distributors: 2, ingestionRateStrategy: validation.GlobalIngestionRateStrategy, ingestionRate: 20, - ingestionBurstSize: 10, + ingestionBurstSize: 20, nativeHistogramIngestionRate: 10, - nativeHistogramIngestionBurstSize: 5, + nativeHistogramIngestionBurstSize: 10, + pushes: []testPush{ + {nhSamples: 2, expectedError: nil}, + {nhSamples: 3, metadata: 2, expectedError: nil}, + {nhSamples: 6, metadata: 1, expectedError: httpgrpc.Errorf(http.StatusTooManyRequests, "native histogram ingestion rate limit (5) exceeded while adding 6 native histogram samples"), expectedNHDiscardedSampleMetricValue: 6, isPartialDrop: true}}, + }, + "global strategy: Batch contains regular and NH samples and NH rate limit is hit": { + distributors: 2, + ingestionRateStrategy: validation.GlobalIngestionRateStrategy, + ingestionRate: 30, + ingestionBurstSize: 30, + nativeHistogramIngestionRate: 10, + nativeHistogramIngestionBurstSize: 10, + pushes: []testPush{ + {samples: 3, nhSamples: 2, metadata: 1, expectedError: nil}, + {samples: 1, nhSamples: 9, metadata: 1, expectedError: httpgrpc.Errorf(http.StatusTooManyRequests, "native histogram ingestion rate limit (5) exceeded while adding 9 native histogram samples"), expectedNHDiscardedSampleMetricValue: 9, isPartialDrop: true}, + {nhSamples: 9, expectedError: httpgrpc.Errorf(http.StatusTooManyRequests, "native histogram ingestion rate limit (5) exceeded while adding 9 native histogram samples"), expectedNHDiscardedSampleMetricValue: 18}, + {samples: 3, metadata: 1, expectedError: nil, expectedNHDiscardedSampleMetricValue: 18}, + }, + }, + "global strategy: Batch contains regular and NH samples and normal ingestion rate limit is hit": { + distributors: 2, + ingestionRateStrategy: validation.GlobalIngestionRateStrategy, + ingestionRate: 20, + ingestionBurstSize: 20, + nativeHistogramIngestionRate: 10, + nativeHistogramIngestionBurstSize: 10, pushes: []testPush{ - {samples: 3, nhSamples: 2, expectedError: nil}, - {samples: 3, nhSamples: 4, expectedError: nil, expectedNHDiscardedSampleMetricValue: 4}, - {samples: 1, metadata: 1, nhSamples: 1, expectedError: httpgrpc.Errorf(http.StatusTooManyRequests, "ingestion rate limit (10) exceeded while adding 2 samples and 1 metadata"), expectedNHDiscardedSampleMetricValue: 4}, - {metadata: 1, expectedError: nil, expectedNHDiscardedSampleMetricValue: 4}, + {samples: 4, nhSamples: 4, metadata: 4, expectedError: nil}, + {samples: 4, nhSamples: 4, metadata: 4, expectedError: httpgrpc.Errorf(http.StatusTooManyRequests, "ingestion rate limit (10) exceeded while adding 8 samples and 4 metadata")}, + {samples: 3, nhSamples: 3, metadata: 2, expectedError: nil}, }, }, } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { t.Parallel() @@ -812,8 +834,13 @@ func TestDistributor_PushIngestionRateLimiter_Histograms(t *testing.T) { assert.Equal(t, emptyResponse, response) assert.Nil(t, err) } else { - assert.Nil(t, response) assert.Equal(t, push.expectedError, err) + // Check if an empty response is expected + if push.isPartialDrop { + assert.Equal(t, emptyResponse, response) + } else { + assert.Nil(t, response) + } } assert.Equal(t, float64(push.expectedNHDiscardedSampleMetricValue), testutil.ToFloat64(distributors[0].validateMetrics.DiscardedSamples.WithLabelValues(validation.NativeHistogramRateLimited, "user"))) } @@ -853,7 +880,7 @@ func TestPush_QuorumError(t *testing.T) { ingesters[1].failResp.Store(httpgrpc.Errorf(500, "InternalServerError")) ingesters[2].failResp.Store(httpgrpc.Errorf(429, "Throttling")) - for i := 0; i < numberOfWrites; i++ { + for range numberOfWrites { request := makeWriteRequest(0, 30, 20, 10) _, err := d.Push(ctx, request) status, ok := status.FromError(err) @@ -866,7 +893,7 @@ func TestPush_QuorumError(t *testing.T) { ingesters[1].failResp.Store(httpgrpc.Errorf(429, "Throttling")) ingesters[2].failResp.Store(httpgrpc.Errorf(500, "InternalServerError")) - for i := 0; i < numberOfWrites; i++ { + for range numberOfWrites { request := makeWriteRequest(0, 300, 200, 10) _, err := d.Push(ctx, request) status, ok := status.FromError(err) @@ -879,7 +906,7 @@ func TestPush_QuorumError(t *testing.T) { ingesters[1].failResp.Store(httpgrpc.Errorf(429, "Throttling")) ingesters[2].happy.Store(true) - for i := 0; i < numberOfWrites; i++ { + for range numberOfWrites { request := makeWriteRequest(0, 30, 20, 10) _, err := d.Push(ctx, request) status, ok := status.FromError(err) @@ -892,7 +919,7 @@ func TestPush_QuorumError(t *testing.T) { ingesters[1].happy.Store(true) ingesters[2].happy.Store(true) - for i := 0; i < 1; i++ { + for range 1 { request := makeWriteRequest(0, 30, 20, 10) _, err := d.Push(ctx, request) require.NoError(t, err) @@ -903,7 +930,7 @@ func TestPush_QuorumError(t *testing.T) { ingesters[1].happy.Store(true) ingesters[2].happy.Store(true) - err := r.KVClient.CAS(context.Background(), ingester.RingKey, func(in interface{}) (interface{}, bool, error) { + err := r.KVClient.CAS(context.Background(), ingester.RingKey, func(in any) (any, bool, error) { r := in.(*ring.Desc) ingester2 := r.Ingesters["ingester-2"] ingester2.State = ring.LEFT @@ -915,12 +942,12 @@ func TestPush_QuorumError(t *testing.T) { require.NoError(t, err) // Give time to the ring get updated with the KV value - test.Poll(t, 15*time.Second, true, func() interface{} { + test.Poll(t, 15*time.Second, true, func() any { replicationSet, _ := r.GetAllHealthy(ring.Read) return len(replicationSet.Instances) == 2 }) - for i := 0; i < numberOfWrites; i++ { + for range numberOfWrites { request := makeWriteRequest(0, 30, 20, 10) _, err := d.Push(ctx, request) require.Error(t, err) @@ -1067,10 +1094,8 @@ func TestDistributor_PushInstanceLimits(t *testing.T) { } for testName, testData := range tests { - testData := testData for _, enableHistogram := range []bool{true, false} { - enableHistogram := enableHistogram t.Run(fmt.Sprintf("%s, histogram=%s", testName, strconv.FormatBool(enableHistogram)), func(t *testing.T) { t.Parallel() limits := &validation.Limits{} @@ -1175,7 +1200,6 @@ func TestDistributor_PushHAInstances(t *testing.T) { tc := tc shardByAllLabels := shardByAllLabels for _, enableHistogram := range []bool{true, false} { - enableHistogram := enableHistogram t.Run(fmt.Sprintf("[%d](shardByAllLabels=%v, histogram=%v)", i, shardByAllLabels, enableHistogram), func(t *testing.T) { t.Parallel() var limits validation.Limits @@ -1238,7 +1262,6 @@ func TestDistributor_PushMixedHAInstances(t *testing.T) { tc := tc shardByAllLabels := shardByAllLabels for _, enableHistogram := range []bool{false} { - enableHistogram := enableHistogram t.Run(fmt.Sprintf("[%d](shardByAllLabels=%v, histogram=%v)", i, shardByAllLabels, enableHistogram), func(t *testing.T) { t.Parallel() var limits validation.Limits @@ -1425,7 +1448,7 @@ func TestDistributor_PushQuery(t *testing.T) { }) // And reading each sample individually. - for i := 0; i < 10; i++ { + for i := range 10 { testcases = append(testcases, testcase{ name: fmt.Sprintf("ReadOne(%s, sample=%d)", scenario, i), numIngesters: numIngesters, @@ -1444,7 +1467,6 @@ func TestDistributor_PushQuery(t *testing.T) { } for _, tc := range testcases { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() ds, ingesters, _, _ := prepare(t, prepConfig{ @@ -1530,7 +1552,7 @@ func TestDistributor_QueryStream_ShouldReturnErrorIfMaxChunksPerQueryLimitIsReac // Push more series to exceed the limit once we'll query back all series. writeReq = &cortexpb.WriteRequest{} - for i := 0; i < maxChunksLimit; i++ { + for i := range maxChunksLimit { writeReq.Timeseries = append(writeReq.Timeseries, makeWriteRequestTimeseries([]cortexpb.LabelAdapter{{Name: model.MetricNameLabel, Value: fmt.Sprintf("another_series_%d", i)}}, 0, 0, histogram), ) @@ -1778,53 +1800,56 @@ func TestDistributor_Push_LabelRemoval(t *testing.T) { { removeReplica: true, removeLabels: []string{"cluster"}, - inputSeries: labels.Labels{ - {Name: "__name__", Value: "some_metric"}, - {Name: "cluster", Value: "one"}, - {Name: "__replica__", Value: "two"}, - }, - expectedSeries: labels.Labels{ - {Name: "__name__", Value: "some_metric"}, - }, + inputSeries: labels.FromStrings( + "__name__", "some_metric", + "cluster", "one", + "__replica__", "two", + ), + expectedSeries: labels.FromStrings( + "__name__", "some_metric", + ), }, + // Remove multiple labels and replica. { removeReplica: true, removeLabels: []string{"foo", "some"}, - inputSeries: labels.Labels{ - {Name: "__name__", Value: "some_metric"}, - {Name: "cluster", Value: "one"}, - {Name: "__replica__", Value: "two"}, - {Name: "foo", Value: "bar"}, - {Name: "some", Value: "thing"}, - }, - expectedSeries: labels.Labels{ - {Name: "__name__", Value: "some_metric"}, - {Name: "cluster", Value: "one"}, - }, + inputSeries: labels.FromStrings( + "__name__", "some_metric", + "cluster", "one", + "__replica__", "two", + "foo", "bar", + "some", "thing", + ), + expectedSeries: labels.FromStrings( + "__name__", "some_metric", + "cluster", "one", + ), }, + // Don't remove any labels. { removeReplica: false, - inputSeries: labels.Labels{ - {Name: "__name__", Value: "some_metric"}, - {Name: "__replica__", Value: "two"}, - {Name: "cluster", Value: "one"}, - }, - expectedSeries: labels.Labels{ - {Name: "__name__", Value: "some_metric"}, - {Name: "__replica__", Value: "two"}, - {Name: "cluster", Value: "one"}, - }, + inputSeries: labels.FromStrings( + "__name__", "some_metric", + "__replica__", "two", + "cluster", "one", + ), + expectedSeries: labels.FromStrings( + "__name__", "some_metric", + "__replica__", "two", + "cluster", "one", + ), }, + // No labels left. { removeReplica: true, removeLabels: []string{"cluster"}, - inputSeries: labels.Labels{ - {Name: "cluster", Value: "one"}, - {Name: "__replica__", Value: "two"}, - }, + inputSeries: labels.FromStrings( + "cluster", "one", + "__replica__", "two", + ), expectedSeries: labels.Labels{}, exemplars: []cortexpb.Exemplar{ {Labels: cortexpb.FromLabelsToLabelAdapters(labels.FromStrings("test", "a")), Value: 1, TimestampMs: 0}, @@ -1897,13 +1922,9 @@ func TestDistributor_Push_LabelRemoval_RemovingNameLabelWillError(t *testing.T) } tc := testcase{ - removeReplica: true, - removeLabels: []string{"__name__"}, - inputSeries: labels.Labels{ - {Name: "__name__", Value: "some_metric"}, - {Name: "cluster", Value: "one"}, - {Name: "__replica__", Value: "two"}, - }, + removeReplica: true, + removeLabels: []string{"__name__"}, + inputSeries: labels.FromStrings("__name__", "some_metric", "cluster", "one", "__replica__", "two"), expectedSeries: labels.Labels{}, } @@ -1937,66 +1958,70 @@ func TestDistributor_Push_ShouldGuaranteeShardingTokenConsistencyOverTheTime(t * expectedToken uint32 }{ "metric_1 with value_1": { - inputSeries: labels.Labels{ - {Name: "__name__", Value: "metric_1"}, - {Name: "cluster", Value: "cluster_1"}, - {Name: "key", Value: "value_1"}, - }, - expectedSeries: labels.Labels{ - {Name: "__name__", Value: "metric_1"}, - {Name: "cluster", Value: "cluster_1"}, - {Name: "key", Value: "value_1"}, - }, + inputSeries: labels.FromStrings( + "__name__", "metric_1", + "cluster", "cluster_1", + "key", "value_1", + ), + expectedSeries: labels.FromStrings( + "__name__", "metric_1", + "cluster", "cluster_1", + "key", "value_1", + ), expectedToken: 0xec0a2e9d, }, + "metric_1 with value_1 and dropped label due to config": { - inputSeries: labels.Labels{ - {Name: "__name__", Value: "metric_1"}, - {Name: "cluster", Value: "cluster_1"}, - {Name: "key", Value: "value_1"}, - {Name: "dropped", Value: "unused"}, // will be dropped, doesn't need to be in correct order - }, - expectedSeries: labels.Labels{ - {Name: "__name__", Value: "metric_1"}, - {Name: "cluster", Value: "cluster_1"}, - {Name: "key", Value: "value_1"}, - }, + inputSeries: labels.FromStrings( + "__name__", "metric_1", + "cluster", "cluster_1", + "key", "value_1", + "dropped", "unused", + ), + expectedSeries: labels.FromStrings( + "__name__", "metric_1", + "cluster", "cluster_1", + "key", "value_1", + ), expectedToken: 0xec0a2e9d, }, + "metric_1 with value_1 and dropped HA replica label": { - inputSeries: labels.Labels{ - {Name: "__name__", Value: "metric_1"}, - {Name: "cluster", Value: "cluster_1"}, - {Name: "key", Value: "value_1"}, - {Name: "__replica__", Value: "replica_1"}, - }, - expectedSeries: labels.Labels{ - {Name: "__name__", Value: "metric_1"}, - {Name: "cluster", Value: "cluster_1"}, - {Name: "key", Value: "value_1"}, - }, + inputSeries: labels.FromStrings( + "__name__", "metric_1", + "cluster", "cluster_1", + "key", "value_1", + "__replica__", "replica_1", + ), + expectedSeries: labels.FromStrings( + "__name__", "metric_1", + "cluster", "cluster_1", + "key", "value_1", + ), expectedToken: 0xec0a2e9d, }, + "metric_2 with value_1": { - inputSeries: labels.Labels{ - {Name: "__name__", Value: "metric_2"}, - {Name: "key", Value: "value_1"}, - }, - expectedSeries: labels.Labels{ - {Name: "__name__", Value: "metric_2"}, - {Name: "key", Value: "value_1"}, - }, + inputSeries: labels.FromStrings( + "__name__", "metric_2", + "key", "value_1", + ), + expectedSeries: labels.FromStrings( + "__name__", "metric_2", + "key", "value_1", + ), expectedToken: 0xa60906f2, }, + "metric_1 with value_2": { - inputSeries: labels.Labels{ - {Name: "__name__", Value: "metric_1"}, - {Name: "key", Value: "value_2"}, - }, - expectedSeries: labels.Labels{ - {Name: "__name__", Value: "metric_1"}, - {Name: "key", Value: "value_2"}, - }, + inputSeries: labels.FromStrings( + "__name__", "metric_1", + "key", "value_2", + ), + expectedSeries: labels.FromStrings( + "__name__", "metric_1", + "key", "value_2", + ), expectedToken: 0x18abc8a2, }, } @@ -2007,7 +2032,6 @@ func TestDistributor_Push_ShouldGuaranteeShardingTokenConsistencyOverTheTime(t * limits.AcceptHASamples = true for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { t.Parallel() ds, ingesters, _, _ := prepare(t, prepConfig{ @@ -2039,10 +2063,7 @@ func TestDistributor_Push_ShouldGuaranteeShardingTokenConsistencyOverTheTime(t * func TestDistributor_Push_LabelNameValidation(t *testing.T) { t.Parallel() - inputLabels := labels.Labels{ - {Name: model.MetricNameLabel, Value: "foo"}, - {Name: "999.illegal", Value: "baz"}, - } + inputLabels := labels.FromStrings(model.MetricNameLabel, "foo", "999.illegal", "baz") ctx := user.InjectOrgID(context.Background(), "user") tests := map[string]struct { @@ -2070,9 +2091,7 @@ func TestDistributor_Push_LabelNameValidation(t *testing.T) { } for testName, tc := range tests { - tc := tc for _, histogram := range []bool{true, false} { - histogram := histogram t.Run(fmt.Sprintf("%s, histogram=%s", testName, strconv.FormatBool(histogram)), func(t *testing.T) { t.Parallel() ds, _, _, _ := prepare(t, prepConfig{ @@ -2138,7 +2157,6 @@ func TestDistributor_Push_ExemplarValidation(t *testing.T) { } for testName, tc := range tests { - tc := tc t.Run(testName, func(t *testing.T) { t.Parallel() ds, _, _, _ := prepare(t, prepConfig{ @@ -2208,9 +2226,8 @@ func BenchmarkDistributor_GetLabelsValues(b *testing.B) { lblValuesDuplicateRatio: tc.lblValuesDuplicateRatio, }) b.Run(name, func(b *testing.B) { - b.ResetTimer() b.ReportAllocs() - for i := 0; i < b.N; i++ { + for b.Loop() { _, err := ds[0].LabelValuesForLabelName(ctx, model.Time(time.Now().UnixMilli()), model.Time(time.Now().UnixMilli()), "__name__", nil, false) require.NoError(b, err) } @@ -2235,9 +2252,9 @@ func BenchmarkDistributor_Push(b *testing.B) { metrics := make([]labels.Labels, numSeriesPerRequest) samples := make([]cortexpb.Sample, numSeriesPerRequest) - for i := 0; i < numSeriesPerRequest; i++ { - lbls := labels.NewBuilder(labels.Labels{{Name: model.MetricNameLabel, Value: "foo"}}) - for i := 0; i < 10; i++ { + lbls := labels.NewBuilder(labels.FromStrings(model.MetricNameLabel, "foo")) + for i := range numSeriesPerRequest { + for i := range 10 { lbls.Set(fmt.Sprintf("name_%d", i), fmt.Sprintf("value_%d", i)) } @@ -2261,9 +2278,9 @@ func BenchmarkDistributor_Push(b *testing.B) { metrics := make([]labels.Labels, numSeriesPerRequest) samples := make([]cortexpb.Sample, numSeriesPerRequest) - for i := 0; i < numSeriesPerRequest; i++ { - lbls := labels.NewBuilder(labels.Labels{{Name: model.MetricNameLabel, Value: "foo"}}) - for i := 0; i < 10; i++ { + for i := range numSeriesPerRequest { + lbls := labels.NewBuilder(labels.FromStrings(model.MetricNameLabel, "foo")) + for i := range 10 { lbls.Set(fmt.Sprintf("name_%d", i), fmt.Sprintf("value_%d", i)) } @@ -2286,8 +2303,8 @@ func BenchmarkDistributor_Push(b *testing.B) { metrics := make([]labels.Labels, numSeriesPerRequest) samples := make([]cortexpb.Sample, numSeriesPerRequest) - for i := 0; i < numSeriesPerRequest; i++ { - lbls := labels.NewBuilder(labels.Labels{{Name: model.MetricNameLabel, Value: "foo"}}) + for i := range numSeriesPerRequest { + lbls := labels.NewBuilder(labels.FromStrings(model.MetricNameLabel, "foo")) for i := 1; i < 31; i++ { lbls.Set(fmt.Sprintf("name_%d", i), fmt.Sprintf("value_%d", i)) } @@ -2311,9 +2328,9 @@ func BenchmarkDistributor_Push(b *testing.B) { metrics := make([]labels.Labels, numSeriesPerRequest) samples := make([]cortexpb.Sample, numSeriesPerRequest) - for i := 0; i < numSeriesPerRequest; i++ { - lbls := labels.NewBuilder(labels.Labels{{Name: model.MetricNameLabel, Value: "foo"}}) - for i := 0; i < 10; i++ { + for i := range numSeriesPerRequest { + lbls := labels.NewBuilder(labels.FromStrings(model.MetricNameLabel, "foo")) + for i := range 10 { lbls.Set(fmt.Sprintf("name_%d", i), fmt.Sprintf("value_%d", i)) } @@ -2339,9 +2356,9 @@ func BenchmarkDistributor_Push(b *testing.B) { metrics := make([]labels.Labels, numSeriesPerRequest) samples := make([]cortexpb.Sample, numSeriesPerRequest) - for i := 0; i < numSeriesPerRequest; i++ { - lbls := labels.NewBuilder(labels.Labels{{Name: model.MetricNameLabel, Value: "foo"}}) - for i := 0; i < 10; i++ { + for i := range numSeriesPerRequest { + lbls := labels.NewBuilder(labels.FromStrings(model.MetricNameLabel, "foo")) + for i := range 10 { lbls.Set(fmt.Sprintf("name_%d", i), fmt.Sprintf("value_%d", i)) } @@ -2367,9 +2384,9 @@ func BenchmarkDistributor_Push(b *testing.B) { metrics := make([]labels.Labels, numSeriesPerRequest) samples := make([]cortexpb.Sample, numSeriesPerRequest) - for i := 0; i < numSeriesPerRequest; i++ { - lbls := labels.NewBuilder(labels.Labels{{Name: model.MetricNameLabel, Value: "foo"}}) - for i := 0; i < 10; i++ { + for i := range numSeriesPerRequest { + lbls := labels.NewBuilder(labels.FromStrings(model.MetricNameLabel, "foo")) + for i := range 10 { lbls.Set(fmt.Sprintf("name_%d", i), fmt.Sprintf("value_%d", i)) } @@ -2396,9 +2413,9 @@ func BenchmarkDistributor_Push(b *testing.B) { metrics := make([]labels.Labels, numSeriesPerRequest) samples := make([]cortexpb.Sample, numSeriesPerRequest) - for i := 0; i < numSeriesPerRequest; i++ { - lbls := labels.NewBuilder(labels.Labels{{Name: model.MetricNameLabel, Value: "foo"}}) - for i := 0; i < 10; i++ { + for i := range numSeriesPerRequest { + lbls := labels.NewBuilder(labels.FromStrings(model.MetricNameLabel, "foo")) + for i := range 10 { lbls.Set(fmt.Sprintf("name_%d", i), fmt.Sprintf("value_%d", i)) } @@ -2421,9 +2438,9 @@ func BenchmarkDistributor_Push(b *testing.B) { metrics := make([]labels.Labels, numSeriesPerRequest) samples := make([]cortexpb.Sample, numSeriesPerRequest) - for i := 0; i < numSeriesPerRequest; i++ { - lbls := labels.NewBuilder(labels.Labels{{Name: model.MetricNameLabel, Value: "foo"}}) - for i := 0; i < 10; i++ { + for i := range numSeriesPerRequest { + lbls := labels.NewBuilder(labels.FromStrings(model.MetricNameLabel, "foo")) + for i := range 10 { lbls.Set(fmt.Sprintf("name_%d", i), fmt.Sprintf("value_%d", i)) } @@ -2450,7 +2467,7 @@ func BenchmarkDistributor_Push(b *testing.B) { b.Cleanup(func() { assert.NoError(b, closer.Close()) }) err := kvStore.CAS(context.Background(), ingester.RingKey, - func(_ interface{}) (interface{}, bool, error) { + func(_ any) (any, bool, error) { d := &ring.Desc{} d.AddIngester("ingester-1", "127.0.0.1", "", tg.GenerateTokens(d, "ingester-1", "", 128, true), ring.ACTIVE, time.Now()) return d, true, nil @@ -2469,7 +2486,7 @@ func BenchmarkDistributor_Push(b *testing.B) { require.NoError(b, services.StopAndAwaitTerminated(context.Background(), ingestersRing)) }) - test.Poll(b, time.Second, 1, func() interface{} { + test.Poll(b, time.Second, 1, func() any { return ingestersRing.InstancesCount() }) @@ -2504,9 +2521,8 @@ func BenchmarkDistributor_Push(b *testing.B) { // Run the benchmark. b.ReportAllocs() - b.ResetTimer() - for n := 0; n < b.N; n++ { + for b.Loop() { _, err := distributor.Push(ctx, cortexpb.ToWriteRequest(metrics, samples, nil, nil, cortexpb.API)) if testData.expectedErr == "" && err != nil { b.Fatalf("no error expected but got %v", err) @@ -2570,13 +2586,14 @@ func TestDistributor_MetricsForLabelMatchers_SingleSlowIngester(t *testing.T) { now := model.Now() - for i := 0; i < 100; i++ { - req := mockWriteRequest([]labels.Labels{{{Name: labels.MetricName, Value: "test"}, {Name: "app", Value: "m"}, {Name: "uniq8", Value: strconv.Itoa(i)}}}, 1, now.Unix(), histogram) + for i := range 100 { + + req := mockWriteRequest([]labels.Labels{labels.FromStrings(labels.MetricName, "test", "app", "m", "uniq8", strconv.Itoa(i))}, 1, now.Unix(), histogram) _, err := ds[0].Push(ctx, req) require.NoError(t, err) } - for i := 0; i < 50; i++ { + for range 50 { _, err := ds[0].MetricsForLabelMatchers(ctx, now, now, nil, false, mustNewMatcher(labels.MatchEqual, model.MetricNameLabel, "test")) require.NoError(t, err) } @@ -2592,12 +2609,32 @@ func TestDistributor_MetricsForLabelMatchers(t *testing.T) { value int64 timestamp int64 }{ - {labels.Labels{{Name: labels.MetricName, Value: "test_1"}, {Name: "status", Value: "200"}}, 1, 100000}, - {labels.Labels{{Name: labels.MetricName, Value: "test_1"}, {Name: "status", Value: "500"}}, 1, 110000}, - {labels.Labels{{Name: labels.MetricName, Value: "test_2"}}, 2, 200000}, + { + lbls: labels.FromStrings(labels.MetricName, "test_1", "status", "200"), + value: 1, + timestamp: 100000, + }, + { + lbls: labels.FromStrings(labels.MetricName, "test_1", "status", "500"), + value: 1, + timestamp: 110000, + }, + { + lbls: labels.FromStrings(labels.MetricName, "test_2"), + value: 2, + timestamp: 200000, + }, // The two following series have the same FastFingerprint=e002a3a451262627 - {labels.Labels{{Name: labels.MetricName, Value: "fast_fingerprint_collision"}, {Name: "app", Value: "l"}, {Name: "uniq0", Value: "0"}, {Name: "uniq1", Value: "1"}}, 1, 300000}, - {labels.Labels{{Name: labels.MetricName, Value: "fast_fingerprint_collision"}, {Name: "app", Value: "m"}, {Name: "uniq0", Value: "1"}, {Name: "uniq1", Value: "1"}}, 1, 300000}, + { + lbls: labels.FromStrings(labels.MetricName, "fast_fingerprint_collision", "app", "l", "uniq0", "0", "uniq1", "1"), + value: 1, + timestamp: 300000, + }, + { + lbls: labels.FromStrings(labels.MetricName, "fast_fingerprint_collision", "app", "m", "uniq0", "1", "uniq1", "1"), + value: 1, + timestamp: 300000, + }, } tests := map[string]struct { @@ -2718,9 +2755,7 @@ func TestDistributor_MetricsForLabelMatchers(t *testing.T) { } for testName, testData := range tests { - testData := testData for _, histogram := range []bool{true, false} { - histogram := histogram t.Run(fmt.Sprintf("%s, histogram=%s", testName, strconv.FormatBool(histogram)), func(t *testing.T) { t.Parallel() now := model.Now() @@ -2799,9 +2834,9 @@ func BenchmarkDistributor_MetricsForLabelMatchers(b *testing.B) { metrics := make([]labels.Labels, numSeriesPerRequest) samples := make([]cortexpb.Sample, numSeriesPerRequest) - for i := 0; i < numSeriesPerRequest; i++ { - lbls := labels.NewBuilder(labels.Labels{{Name: model.MetricNameLabel, Value: fmt.Sprintf("foo_%d", i)}}) - for i := 0; i < 10; i++ { + for i := range numSeriesPerRequest { + lbls := labels.NewBuilder(labels.FromStrings(model.MetricNameLabel, fmt.Sprintf("foo_%d", i))) + for i := range 10 { lbls.Set(fmt.Sprintf("name_%d", i), fmt.Sprintf("value_%d", i)) } @@ -2847,9 +2882,8 @@ func BenchmarkDistributor_MetricsForLabelMatchers(b *testing.B) { // Run the benchmark. b.ReportAllocs() - b.ResetTimer() - for n := 0; n < b.N; n++ { + for b.Loop() { now := model.Now() metrics, err := ds[0].MetricsForLabelMatchers(ctx, now, now, nil, false, testData.matchers...) @@ -2897,7 +2931,6 @@ func TestDistributor_MetricsMetadata(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { t.Parallel() // Create distributor @@ -3042,7 +3075,7 @@ func prepare(tb testing.TB, cfg prepConfig) ([]*Distributor, []*mockIngester, [] tb.Cleanup(func() { assert.NoError(tb, closer.Close()) }) err := kvStore.CAS(context.Background(), ingester.RingKey, - func(_ interface{}) (interface{}, bool, error) { + func(_ any) (any, bool, error) { return &ring.Desc{ Ingesters: ingesterDescs, }, true, nil @@ -3066,7 +3099,7 @@ func prepare(tb testing.TB, cfg prepConfig) ([]*Distributor, []*mockIngester, [] require.NoError(tb, err) require.NoError(tb, services.StartAndAwaitRunning(context.Background(), ingestersRing)) - test.Poll(tb, time.Second, cfg.numIngesters, func() interface{} { + test.Poll(tb, time.Second, cfg.numIngesters, func() any { return ingestersRing.InstancesCount() }) @@ -3134,7 +3167,7 @@ func prepare(tb testing.TB, cfg prepConfig) ([]*Distributor, []*mockIngester, [] // If the distributors ring is setup, wait until the first distributor // updates to the expected size if distributors[0].distributorsRing != nil { - test.Poll(tb, time.Second, cfg.numDistributors, func() interface{} { + test.Poll(tb, time.Second, cfg.numDistributors, func() any { return distributors[0].distributorsLifeCycler.HealthyInstancesCount() }) } @@ -3155,7 +3188,7 @@ func stopAll(ds []*Distributor, r *ring.Ring) { func makeWriteRequest(startTimestampMs int64, samples int, metadata int, histograms int) *cortexpb.WriteRequest { request := &cortexpb.WriteRequest{} - for i := 0; i < samples; i++ { + for i := range samples { request.Timeseries = append(request.Timeseries, makeWriteRequestTimeseries( []cortexpb.LabelAdapter{ {Name: model.MetricNameLabel, Value: "foo"}, @@ -3164,7 +3197,7 @@ func makeWriteRequest(startTimestampMs int64, samples int, metadata int, histogr }, startTimestampMs+int64(i), int64(i), false)) } - for i := 0; i < histograms; i++ { + for i := range histograms { request.Timeseries = append(request.Timeseries, makeWriteRequestTimeseries( []cortexpb.LabelAdapter{ {Name: model.MetricNameLabel, Value: "foo"}, @@ -3173,7 +3206,7 @@ func makeWriteRequest(startTimestampMs int64, samples int, metadata int, histogr }, startTimestampMs+int64(i), int64(i), true)) } - for i := 0; i < metadata; i++ { + for i := range metadata { m := &cortexpb.MetricMetadata{ MetricFamilyName: fmt.Sprintf("metric_%d", i), Type: cortexpb.COUNTER, @@ -3204,7 +3237,7 @@ func makeWriteRequestTimeseries(labels []cortexpb.LabelAdapter, ts, value int64, func makeWriteRequestHA(samples int, replica, cluster string, histogram bool) *cortexpb.WriteRequest { request := &cortexpb.WriteRequest{} - for i := 0; i < samples; i++ { + for i := range samples { ts := cortexpb.PreallocTimeseries{ TimeSeries: &cortexpb.TimeSeries{ Labels: []cortexpb.LabelAdapter{ @@ -3319,7 +3352,7 @@ func makeWriteRequestHAMixedSamples(samples int, histogram bool) *cortexpb.Write } } else { var s = make([]cortexpb.Sample, 0) - for i := 0; i < samples; i++ { + for i := range samples { sample := cortexpb.Sample{ Value: float64(i), TimestampMs: int64(i), @@ -3425,9 +3458,7 @@ func (i *mockIngester) series() map[uint32]*cortexpb.PreallocTimeseries { defer i.Unlock() result := map[uint32]*cortexpb.PreallocTimeseries{} - for k, v := range i.timeseries { - result[k] = v - } + maps.Copy(result, i.timeseries) return result } @@ -3789,7 +3820,9 @@ func TestDistributorValidation(t *testing.T) { // Test validation passes. { metadata: []*cortexpb.MetricMetadata{{MetricFamilyName: "testmetric", Help: "a test metric.", Unit: "", Type: cortexpb.COUNTER}}, - labels: []labels.Labels{{{Name: labels.MetricName, Value: "testmetric"}, {Name: "foo", Value: "bar"}}}, + labels: []labels.Labels{ + labels.FromStrings(labels.MetricName, "testmetric", "foo", "bar"), + }, samples: []cortexpb.Sample{{ TimestampMs: int64(now), Value: 1, @@ -3800,7 +3833,9 @@ func TestDistributorValidation(t *testing.T) { }, // Test validation fails for very old samples. { - labels: []labels.Labels{{{Name: labels.MetricName, Value: "testmetric"}, {Name: "foo", Value: "bar"}}}, + labels: []labels.Labels{ + labels.FromStrings(labels.MetricName, "testmetric", "foo", "bar"), + }, samples: []cortexpb.Sample{{ TimestampMs: int64(past), Value: 2, @@ -3809,7 +3844,9 @@ func TestDistributorValidation(t *testing.T) { }, // Test validation fails for samples from the future. { - labels: []labels.Labels{{{Name: labels.MetricName, Value: "testmetric"}, {Name: "foo", Value: "bar"}}}, + labels: []labels.Labels{ + labels.FromStrings(labels.MetricName, "testmetric", "foo", "bar"), + }, samples: []cortexpb.Sample{{ TimestampMs: int64(future), Value: 4, @@ -3819,7 +3856,9 @@ func TestDistributorValidation(t *testing.T) { // Test maximum labels names per series. { - labels: []labels.Labels{{{Name: labels.MetricName, Value: "testmetric"}, {Name: "foo", Value: "bar"}, {Name: "foo2", Value: "bar2"}}}, + labels: []labels.Labels{ + labels.FromStrings(labels.MetricName, "testmetric", "foo", "bar", "foo2", "bar2"), + }, samples: []cortexpb.Sample{{ TimestampMs: int64(now), Value: 2, @@ -3829,8 +3868,8 @@ func TestDistributorValidation(t *testing.T) { // Test multiple validation fails return the first one. { labels: []labels.Labels{ - {{Name: labels.MetricName, Value: "testmetric"}, {Name: "foo", Value: "bar"}, {Name: "foo2", Value: "bar2"}}, - {{Name: labels.MetricName, Value: "testmetric"}, {Name: "foo", Value: "bar"}}, + labels.FromStrings(labels.MetricName, "testmetric", "foo", "bar", "foo2", "bar2"), + labels.FromStrings(labels.MetricName, "testmetric", "foo", "bar"), }, samples: []cortexpb.Sample{ {TimestampMs: int64(now), Value: 2}, @@ -3841,7 +3880,9 @@ func TestDistributorValidation(t *testing.T) { // Test metadata validation fails { metadata: []*cortexpb.MetricMetadata{{MetricFamilyName: "", Help: "a test metric.", Unit: "", Type: cortexpb.COUNTER}}, - labels: []labels.Labels{{{Name: labels.MetricName, Value: "testmetric"}, {Name: "foo", Value: "bar"}}}, + labels: []labels.Labels{ + labels.FromStrings(labels.MetricName, "testmetric", "foo", "bar"), + }, samples: []cortexpb.Sample{{ TimestampMs: int64(now), Value: 1, @@ -3850,7 +3891,9 @@ func TestDistributorValidation(t *testing.T) { }, // Test maximum labels names per series for histogram samples. { - labels: []labels.Labels{{{Name: labels.MetricName, Value: "testmetric"}, {Name: "foo", Value: "bar"}, {Name: "foo2", Value: "bar2"}}}, + labels: []labels.Labels{ + labels.FromStrings(labels.MetricName, "testmetric", "foo", "bar", "foo2", "bar2"), + }, histograms: []cortexpb.Histogram{ cortexpb.HistogramToHistogramProto(int64(now), testHistogram), }, @@ -3858,7 +3901,9 @@ func TestDistributorValidation(t *testing.T) { }, // Test validation fails for very old histogram samples. { - labels: []labels.Labels{{{Name: labels.MetricName, Value: "testmetric"}, {Name: "foo", Value: "bar"}}}, + labels: []labels.Labels{ + labels.FromStrings(labels.MetricName, "testmetric", "foo", "bar"), + }, histograms: []cortexpb.Histogram{ cortexpb.HistogramToHistogramProto(int64(past), testHistogram), }, @@ -3866,14 +3911,15 @@ func TestDistributorValidation(t *testing.T) { }, // Test validation fails for histogram samples from the future. { - labels: []labels.Labels{{{Name: labels.MetricName, Value: "testmetric"}, {Name: "foo", Value: "bar"}}}, + labels: []labels.Labels{ + labels.FromStrings(labels.MetricName, "testmetric", "foo", "bar"), + }, histograms: []cortexpb.Histogram{ cortexpb.FloatHistogramToHistogramProto(int64(future), testFloatHistogram), }, err: httpgrpc.Errorf(http.StatusBadRequest, `timestamp too new: %d metric: "testmetric"`, future), }, } { - tc := tc t.Run(strconv.Itoa(i), func(t *testing.T) { t.Parallel() var limits validation.Limits @@ -4004,28 +4050,16 @@ func TestDistributor_Push_Relabel(t *testing.T) { { name: "with no relabel config", inputSeries: []labels.Labels{ - { - {Name: "__name__", Value: "foo"}, - {Name: "cluster", Value: "one"}, - }, - }, - expectedSeries: labels.Labels{ - {Name: "__name__", Value: "foo"}, - {Name: "cluster", Value: "one"}, + labels.FromStrings("__name__", "foo", "cluster", "one"), }, + expectedSeries: labels.FromStrings("__name__", "foo", "cluster", "one"), }, { name: "with hardcoded replace", inputSeries: []labels.Labels{ - { - {Name: "__name__", Value: "foo"}, - {Name: "cluster", Value: "one"}, - }, - }, - expectedSeries: labels.Labels{ - {Name: "__name__", Value: "foo"}, - {Name: "cluster", Value: "two"}, + labels.FromStrings("__name__", "foo", "cluster", "one"), }, + expectedSeries: labels.FromStrings("__name__", "foo", "cluster", "two"), metricRelabelConfigs: []*relabel.Config{ { SourceLabels: []model.LabelName{"cluster"}, @@ -4039,19 +4073,10 @@ func TestDistributor_Push_Relabel(t *testing.T) { { name: "with drop action", inputSeries: []labels.Labels{ - { - {Name: "__name__", Value: "foo"}, - {Name: "cluster", Value: "one"}, - }, - { - {Name: "__name__", Value: "bar"}, - {Name: "cluster", Value: "two"}, - }, - }, - expectedSeries: labels.Labels{ - {Name: "__name__", Value: "bar"}, - {Name: "cluster", Value: "two"}, + labels.FromStrings("__name__", "foo", "cluster", "one"), + labels.FromStrings("__name__", "bar", "cluster", "two"), }, + expectedSeries: labels.FromStrings("__name__", "bar", "cluster", "two"), metricRelabelConfigs: []*relabel.Config{ { SourceLabels: []model.LabelName{"__name__"}, @@ -4063,9 +4088,7 @@ func TestDistributor_Push_Relabel(t *testing.T) { } for _, tc := range cases { - tc := tc for _, enableHistogram := range []bool{false, true} { - enableHistogram := enableHistogram t.Run(fmt.Sprintf("%s, histogram=%s", tc.name, strconv.FormatBool(enableHistogram)), func(t *testing.T) { t.Parallel() var err error @@ -4113,24 +4136,14 @@ func TestDistributor_Push_EmptyLabel(t *testing.T) { { name: "with empty label", inputSeries: []labels.Labels{ - { //Token 1106054332 without filtering - {Name: "__name__", Value: "foo"}, - {Name: "empty", Value: ""}, - }, - { //Token 3827924124 without filtering - {Name: "__name__", Value: "foo"}, - {Name: "changHash", Value: ""}, - }, - }, - expectedSeries: labels.Labels{ - //Token 1797290973 - {Name: "__name__", Value: "foo"}, + labels.FromStrings("__name__", "foo", "empty", ""), + labels.FromStrings("__name__", "foo", "changHash", ""), }, + expectedSeries: labels.FromStrings("__name__", "foo"), }, } for _, tc := range cases { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() var err error @@ -4191,14 +4204,8 @@ func TestDistributor_Push_RelabelDropWillExportMetricOfDroppedSamples(t *testing } inputSeries := []labels.Labels{ - { - {Name: "__name__", Value: "foo"}, - {Name: "cluster", Value: "one"}, - }, - { - {Name: "__name__", Value: "bar"}, - {Name: "cluster", Value: "two"}, - }, + labels.FromStrings("__name__", "foo", "cluster", "one"), + labels.FromStrings("__name__", "bar", "cluster", "two"), } var err error @@ -4248,22 +4255,10 @@ func TestDistributor_Push_RelabelDropWillExportMetricOfDroppedSamples(t *testing func TestDistributor_PushLabelSetMetrics(t *testing.T) { t.Parallel() inputSeries := []labels.Labels{ - { - {Name: "__name__", Value: "foo"}, - {Name: "cluster", Value: "one"}, - }, - { - {Name: "__name__", Value: "bar"}, - {Name: "cluster", Value: "one"}, - }, - { - {Name: "__name__", Value: "bar"}, - {Name: "cluster", Value: "two"}, - }, - { - {Name: "__name__", Value: "foo"}, - {Name: "cluster", Value: "three"}, - }, + labels.FromStrings("__name__", "foo", "cluster", "one"), + labels.FromStrings("__name__", "bar", "cluster", "one"), + labels.FromStrings("__name__", "bar", "cluster", "two"), + labels.FromStrings("__name__", "foo", "cluster", "three"), } var err error @@ -4301,14 +4296,8 @@ func TestDistributor_PushLabelSetMetrics(t *testing.T) { // Push more series. inputSeries = []labels.Labels{ - { - {Name: "__name__", Value: "baz"}, - {Name: "cluster", Value: "two"}, - }, - { - {Name: "__name__", Value: "foo"}, - {Name: "cluster", Value: "four"}, - }, + labels.FromStrings("__name__", "baz", "cluster", "two"), + labels.FromStrings("__name__", "foo", "cluster", "four"), } // Write the same request twice for different users. req = mockWriteRequest(inputSeries, 1, 1, false) @@ -4356,7 +4345,7 @@ func TestDistributor_PushLabelSetMetrics(t *testing.T) { func countMockIngestersCalls(ingesters []*mockIngester, name string) int { count := 0 - for i := 0; i < len(ingesters); i++ { + for i := range ingesters { if ingesters[i].countCalls(name) > 0 { count++ } diff --git a/pkg/distributor/ingestion_rate_strategy_test.go b/pkg/distributor/ingestion_rate_strategy_test.go index 84152bd824..fd8ea0d362 100644 --- a/pkg/distributor/ingestion_rate_strategy_test.go +++ b/pkg/distributor/ingestion_rate_strategy_test.go @@ -89,7 +89,6 @@ func TestIngestionRateStrategy(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { t.Parallel() diff --git a/pkg/distributor/query.go b/pkg/distributor/query.go index 9645a65672..3b44d895bc 100644 --- a/pkg/distributor/query.go +++ b/pkg/distributor/query.go @@ -162,7 +162,7 @@ func mergeExemplarSets(a, b []cortexpb.Exemplar) []cortexpb.Exemplar { func (d *Distributor) queryIngestersExemplars(ctx context.Context, replicationSet ring.ReplicationSet, req *ingester_client.ExemplarQueryRequest) (*ingester_client.ExemplarQueryResponse, error) { // Fetch exemplars from multiple ingesters in parallel, using the replicationSet // to deal with consistency. - results, err := replicationSet.Do(ctx, d.cfg.ExtraQueryDelay, false, false, func(ctx context.Context, ing *ring.InstanceDesc) (interface{}, error) { + results, err := replicationSet.Do(ctx, d.cfg.ExtraQueryDelay, false, false, func(ctx context.Context, ing *ring.InstanceDesc) (any, error) { client, err := d.ingesterPool.GetClientFor(ing.Addr) if err != nil { return nil, err @@ -190,7 +190,7 @@ func (d *Distributor) queryIngestersExemplars(ctx context.Context, replicationSe return mergeExemplarQueryResponses(results), nil } -func mergeExemplarQueryResponses(results []interface{}) *ingester_client.ExemplarQueryResponse { +func mergeExemplarQueryResponses(results []any) *ingester_client.ExemplarQueryResponse { var keys []string exemplarResults := make(map[string]cortexpb.TimeSeries) buf := make([]byte, 0, 1024) @@ -229,7 +229,7 @@ func (d *Distributor) queryIngesterStream(ctx context.Context, replicationSet ri ) // Fetch samples from multiple ingesters - results, err := replicationSet.Do(ctx, d.cfg.ExtraQueryDelay, false, partialDataEnabled, func(ctx context.Context, ing *ring.InstanceDesc) (interface{}, error) { + results, err := replicationSet.Do(ctx, d.cfg.ExtraQueryDelay, false, partialDataEnabled, func(ctx context.Context, ing *ring.InstanceDesc) (any, error) { client, err := d.ingesterPool.GetClientFor(ing.Addr) if err != nil { return nil, err diff --git a/pkg/distributor/query_test.go b/pkg/distributor/query_test.go index 5c9d35073b..384cd849a7 100644 --- a/pkg/distributor/query_test.go +++ b/pkg/distributor/query_test.go @@ -73,16 +73,15 @@ func TestMergeExemplars(t *testing.T) { {Labels: labels2, Exemplars: []cortexpb.Exemplar{exemplar3, exemplar4}}}, }, } { - c := c t.Run(fmt.Sprint("test", i), func(t *testing.T) { t.Parallel() rA := &ingester_client.ExemplarQueryResponse{Timeseries: c.seriesA} rB := &ingester_client.ExemplarQueryResponse{Timeseries: c.seriesB} - e := mergeExemplarQueryResponses([]interface{}{rA, rB}) + e := mergeExemplarQueryResponses([]any{rA, rB}) require.Equal(t, c.expected, e.Timeseries) if !c.nonReversible { // Check the other way round too - e = mergeExemplarQueryResponses([]interface{}{rB, rA}) + e = mergeExemplarQueryResponses([]any{rB, rA}) require.Equal(t, c.expected, e.Timeseries) } }) diff --git a/pkg/engine/engine.go b/pkg/engine/engine.go index be22e4573a..8b02607a09 100644 --- a/pkg/engine/engine.go +++ b/pkg/engine/engine.go @@ -10,6 +10,7 @@ import ( "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/storage" thanosengine "github.com/thanos-io/promql-engine/engine" + "github.com/thanos-io/promql-engine/logicalplan" ) type engineKeyType struct{} @@ -43,6 +44,12 @@ func GetEngineType(ctx context.Context) Type { return None } +type QueryEngine interface { + promql.QueryEngine + MakeInstantQueryFromPlan(ctx context.Context, q storage.Queryable, opts promql.QueryOpts, root logicalplan.Node, ts time.Time, qs string) (promql.Query, error) + MakeRangeQueryFromPlan(ctx context.Context, q storage.Queryable, opts promql.QueryOpts, root logicalplan.Node, start time.Time, end time.Time, interval time.Duration, qs string) (promql.Query, error) +} + type Engine struct { prometheusEngine *promql.Engine thanosEngine *thanosengine.Engine @@ -127,6 +134,53 @@ prom: return qf.prometheusEngine.NewRangeQuery(ctx, q, opts, qs, start, end, interval) } +func (qf *Engine) MakeInstantQueryFromPlan(ctx context.Context, q storage.Queryable, opts promql.QueryOpts, root logicalplan.Node, ts time.Time, qs string) (promql.Query, error) { + if engineType := GetEngineType(ctx); engineType == Prometheus { + qf.engineSwitchQueriesTotal.WithLabelValues(string(Prometheus)).Inc() + } else if engineType == Thanos { + qf.engineSwitchQueriesTotal.WithLabelValues(string(Thanos)).Inc() + } + + if qf.thanosEngine != nil { + res, err := qf.thanosEngine.MakeInstantQueryFromPlan(ctx, q, fromPromQLOpts(opts), root, ts) + if err != nil { + if thanosengine.IsUnimplemented(err) { + // fallback to use prometheus engine + qf.fallbackQueriesTotal.Inc() + goto prom + } + return nil, err + } + return res, nil + } + +prom: + return qf.prometheusEngine.NewInstantQuery(ctx, q, opts, qs, ts) +} + +func (qf *Engine) MakeRangeQueryFromPlan(ctx context.Context, q storage.Queryable, opts promql.QueryOpts, root logicalplan.Node, start time.Time, end time.Time, interval time.Duration, qs string) (promql.Query, error) { + if engineType := GetEngineType(ctx); engineType == Prometheus { + qf.engineSwitchQueriesTotal.WithLabelValues(string(Prometheus)).Inc() + } else if engineType == Thanos { + qf.engineSwitchQueriesTotal.WithLabelValues(string(Thanos)).Inc() + } + if qf.thanosEngine != nil { + res, err := qf.thanosEngine.MakeRangeQueryFromPlan(ctx, q, fromPromQLOpts(opts), root, start, end, interval) + if err != nil { + if thanosengine.IsUnimplemented(err) { + // fallback to use prometheus engine + qf.fallbackQueriesTotal.Inc() + goto prom + } + return nil, err + } + return res, nil + } + +prom: + return qf.prometheusEngine.NewRangeQuery(ctx, q, opts, qs, start, end, interval) +} + func fromPromQLOpts(opts promql.QueryOpts) *thanosengine.QueryOpts { if opts == nil { return &thanosengine.QueryOpts{} diff --git a/pkg/engine/engine_test.go b/pkg/engine/engine_test.go index 7b270e6604..db00be1267 100644 --- a/pkg/engine/engine_test.go +++ b/pkg/engine/engine_test.go @@ -14,8 +14,11 @@ import ( "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/promql/promqltest" - "github.com/stretchr/testify/require" "github.com/thanos-io/promql-engine/execution/parse" + "github.com/thanos-io/promql-engine/logicalplan" + "github.com/thanos-io/promql-engine/query" + + "github.com/stretchr/testify/require" utillog "github.com/cortexproject/cortex/pkg/util/log" ) @@ -123,3 +126,98 @@ func TestEngine_XFunctions(t *testing.T) { }) } } + +func TestEngine_With_Logical_Plan(t *testing.T) { + ctx := context.Background() + reg := prometheus.NewRegistry() + + now := time.Now() + start := time.Now().Add(-time.Minute * 5) + step := time.Minute + queryable := promqltest.LoadedStorage(t, "") + opts := promql.EngineOpts{ + Logger: utillog.GoKitLogToSlog(log.NewNopLogger()), + Reg: reg, + } + queryEngine := New(opts, ThanosEngineConfig{Enabled: true}, reg) + + range_lp := createTestLogicalPlan(t, start, now, step, "up") + instant_lp := createTestLogicalPlan(t, now, now, 0, "up") + + r := &http.Request{Header: http.Header{}} + r.Header.Set(TypeHeader, string(Thanos)) + ctx = AddEngineTypeToContext(ctx, r) + + // Case 1: Executing logical plan with thanos engine + _, _ = queryEngine.MakeInstantQueryFromPlan(ctx, queryable, nil, instant_lp.Root(), now, "up") + require.NoError(t, testutil.GatherAndCompare(reg, bytes.NewBufferString(` + # HELP cortex_thanos_engine_fallback_queries_total Total number of fallback queries due to not implementation in thanos engine + # TYPE cortex_thanos_engine_fallback_queries_total counter + cortex_thanos_engine_fallback_queries_total 0 + `), "cortex_thanos_engine_fallback_queries_total")) + + _, _ = queryEngine.MakeRangeQueryFromPlan(ctx, queryable, nil, range_lp.Root(), start, now, step, "up") + require.NoError(t, testutil.GatherAndCompare(reg, bytes.NewBufferString(` + # HELP cortex_thanos_engine_fallback_queries_total Total number of fallback queries due to not implementation in thanos engine + # TYPE cortex_thanos_engine_fallback_queries_total counter + cortex_thanos_engine_fallback_queries_total 0 + `), "cortex_thanos_engine_fallback_queries_total")) + + // Case 2: Logical plan that thanos engine cannot execute (so it will fall back to prometheus engine) + err_range_lp := createTestLogicalPlan(t, start, now, step, "up[10]") + _, _ = queryEngine.MakeRangeQueryFromPlan(ctx, queryable, nil, err_range_lp.Root(), start, now, step, "up") + require.NoError(t, testutil.GatherAndCompare(reg, bytes.NewBufferString(` + # HELP cortex_thanos_engine_fallback_queries_total Total number of fallback queries due to not implementation in thanos engine + # TYPE cortex_thanos_engine_fallback_queries_total counter + cortex_thanos_engine_fallback_queries_total 1 + `), "cortex_thanos_engine_fallback_queries_total")) + + // Case 3: executing with prometheus engine + r.Header.Set(TypeHeader, string(Prometheus)) + ctx = AddEngineTypeToContext(ctx, r) + + _, _ = queryEngine.MakeInstantQueryFromPlan(ctx, queryable, nil, instant_lp.Root(), now, "up") + require.NoError(t, testutil.GatherAndCompare(reg, bytes.NewBufferString(` + # HELP cortex_engine_switch_queries_total Total number of queries where engine_type is set explicitly + # TYPE cortex_engine_switch_queries_total counter + cortex_engine_switch_queries_total{engine_type="prometheus"} 1 + cortex_engine_switch_queries_total{engine_type="thanos"} 3 + `), "cortex_engine_switch_queries_total")) + + _, _ = queryEngine.MakeRangeQueryFromPlan(ctx, queryable, nil, range_lp.Root(), start, now, step, "up") + require.NoError(t, testutil.GatherAndCompare(reg, bytes.NewBufferString(` + # HELP cortex_engine_switch_queries_total Total number of queries where engine_type is set explicitly + # TYPE cortex_engine_switch_queries_total counter + cortex_engine_switch_queries_total{engine_type="prometheus"} 2 + cortex_engine_switch_queries_total{engine_type="thanos"} 3 + `), "cortex_engine_switch_queries_total")) +} + +func createTestLogicalPlan(t *testing.T, startTime time.Time, endTime time.Time, step time.Duration, q string) logicalplan.Plan { + + qOpts := query.Options{ + Start: startTime, + End: startTime, + Step: 0, + StepsBatch: 10, + LookbackDelta: 0, + EnablePerStepStats: false, + } + + if step != 0 { + qOpts.End = endTime + qOpts.Step = step + } + + expr, err := parser.NewParser(q, parser.WithFunctions(parser.Functions)).ParseExpr() + require.NoError(t, err) + + planOpts := logicalplan.PlanOptions{ + DisableDuplicateLabelCheck: false, + } + + logicalPlan, err := logicalplan.NewFromAST(expr, &qOpts, planOpts) + require.NoError(t, err) + + return logicalPlan +} diff --git a/pkg/frontend/config.go b/pkg/frontend/config.go index a1109f213a..03dff13980 100644 --- a/pkg/frontend/config.go +++ b/pkg/frontend/config.go @@ -20,8 +20,7 @@ type CombinedFrontendConfig struct { FrontendV1 v1.Config `yaml:",inline"` FrontendV2 v2.Config `yaml:",inline"` - DownstreamURL string `yaml:"downstream_url"` - DistributedExecEnabled bool `yaml:"distributed_exec_enabled" doc:"hidden"` + DownstreamURL string `yaml:"downstream_url"` } func (cfg *CombinedFrontendConfig) RegisterFlags(f *flag.FlagSet) { @@ -30,7 +29,6 @@ func (cfg *CombinedFrontendConfig) RegisterFlags(f *flag.FlagSet) { cfg.FrontendV2.RegisterFlags(f) f.StringVar(&cfg.DownstreamURL, "frontend.downstream-url", "", "URL of downstream Prometheus.") - f.BoolVar(&cfg.DistributedExecEnabled, "frontend.distributed-exec-enabled", false, "Experimental: Enables distributed execution of queries by passing logical query plan fragments to downstream components.") } // InitFrontend initializes frontend (either V1 -- without scheduler, or V2 -- with scheduler) or no frontend at diff --git a/pkg/frontend/transport/handler.go b/pkg/frontend/transport/handler.go index 9001560b52..f35bab20ff 100644 --- a/pkg/frontend/transport/handler.go +++ b/pkg/frontend/transport/handler.go @@ -7,6 +7,7 @@ import ( "flag" "fmt" "io" + "maps" "net/http" "net/url" "strconv" @@ -22,6 +23,8 @@ import ( "github.com/weaveworks/common/httpgrpc" "google.golang.org/grpc/status" + "github.com/cortexproject/cortex/pkg/engine" + "github.com/cortexproject/cortex/pkg/querier" querier_stats "github.com/cortexproject/cortex/pkg/querier/stats" "github.com/cortexproject/cortex/pkg/querier/tenantfederation" "github.com/cortexproject/cortex/pkg/querier/tripperware" @@ -30,6 +33,7 @@ import ( util_api "github.com/cortexproject/cortex/pkg/util/api" "github.com/cortexproject/cortex/pkg/util/limiter" util_log "github.com/cortexproject/cortex/pkg/util/log" + "github.com/cortexproject/cortex/pkg/util/requestmeta" ) const ( @@ -75,8 +79,6 @@ const ( limitBytesStoreGateway = `exceeded bytes limit` ) -var noopResponseSizeLimiter = limiter.NewResponseSizeLimiter(0) - // Config for a Handler. type HandlerConfig struct { LogQueriesLongerThan time.Duration `yaml:"log_queries_longer_than"` @@ -247,11 +249,11 @@ func (f *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } userID := tenant.JoinTenantIDs(tenantIDs) + source := tripperware.GetSource(r) if f.tenantFederationCfg.Enabled { maxTenant := f.tenantFederationCfg.MaxTenant if maxTenant > 0 && len(tenantIDs) > maxTenant { - source := tripperware.GetSource(r.Header.Get("User-Agent")) if f.cfg.QueryStatsEnabled { f.rejectedQueries.WithLabelValues(reasonTooManyTenants, source, userID).Inc() } @@ -283,7 +285,8 @@ func (f *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { // We parse form here so that we can use buf as body, in order to // prevent https://github.com/cortexproject/cortex/issues/5201. // Exclude remote read here as we don't have to buffer its body. - if !strings.Contains(r.URL.Path, "api/v1/read") { + isRemoteRead := strings.Contains(r.URL.Path, "api/v1/read") + if !isRemoteRead { if err := r.ParseForm(); err != nil { statusCode := http.StatusBadRequest if util.IsRequestBodyTooLarge(err) { @@ -291,7 +294,6 @@ func (f *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } http.Error(w, err.Error(), statusCode) if f.cfg.QueryStatsEnabled && util.IsRequestBodyTooLarge(err) { - source := tripperware.GetSource(r.Header.Get("User-Agent")) f.rejectedQueries.WithLabelValues(reasonRequestBodySizeExceeded, source, userID).Inc() } return @@ -299,9 +301,9 @@ func (f *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { r.Body = io.NopCloser(&buf) } - source := tripperware.GetSource(r.Header.Get("User-Agent")) - // Log request - if f.cfg.QueryStatsEnabled { + // Log request if the request is not remote read. + // We need to parse remote read proto to be properly log it so skip it. + if f.cfg.QueryStatsEnabled && !isRemoteRead { queryString = f.parseRequestQueryString(r, buf) f.logQueryRequest(r, queryString, source) } @@ -332,7 +334,7 @@ func (f *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { // If the response status code is not 2xx, try to get the // error message from response body. if resp.StatusCode/100 != 2 { - body, err2 := tripperware.BodyBytes(resp, noopResponseSizeLimiter, f.log) + body, err2 := tripperware.BodyBytes(resp, f.log) if err2 == nil { err = httpgrpc.Errorf(resp.StatusCode, "%s", string(body)) } @@ -353,9 +355,7 @@ func (f *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { return } - for h, vs := range resp.Header { - hs[h] = vs - } + maps.Copy(hs, resp.Header) w.WriteHeader(resp.StatusCode) // log copy response body error so that we will know even though success response code returned @@ -365,10 +365,10 @@ func (f *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } } -func formatGrafanaStatsFields(r *http.Request) []interface{} { +func formatGrafanaStatsFields(r *http.Request) []any { // NOTE(GiedriusS): see https://github.com/grafana/grafana/pull/60301 for more info. - fields := make([]interface{}, 0, 4) + fields := make([]any, 0, 4) if dashboardUID := r.Header.Get("X-Dashboard-Uid"); dashboardUID != "" { fields = append(fields, "X-Dashboard-Uid", dashboardUID) } @@ -380,7 +380,7 @@ func formatGrafanaStatsFields(r *http.Request) []interface{} { // logQueryRequest logs query request before query execution. func (f *Handler) logQueryRequest(r *http.Request, queryString url.Values, source string) { - logMessage := []interface{}{ + logMessage := []any{ "msg", "query request", "component", "query-frontend", "method", r.Method, @@ -399,11 +399,19 @@ func (f *Handler) logQueryRequest(r *http.Request, queryString url.Values, sourc logMessage = append(logMessage, "user_agent", ua) } + if engineType := r.Header.Get(engine.TypeHeader); len(engineType) > 0 { + logMessage = append(logMessage, "engine_type", engineType) + } + + if blockStoreType := r.Header.Get(querier.BlockStoreTypeHeader); len(blockStoreType) > 0 { + logMessage = append(logMessage, "block_store_type", blockStoreType) + } + if acceptEncoding := r.Header.Get("Accept-Encoding"); len(acceptEncoding) > 0 { logMessage = append(logMessage, "accept_encoding", acceptEncoding) } - shouldLog := source == tripperware.SourceAPI || (f.cfg.EnabledRulerQueryStatsLog && source == tripperware.SourceRuler) + shouldLog := source == requestmeta.SourceAPI || (f.cfg.EnabledRulerQueryStatsLog && source == requestmeta.SourceRuler) if shouldLog { logMessage = append(logMessage, formatQueryString(queryString)...) level.Info(util_log.WithContext(r.Context(), f.log)).Log(logMessage...) @@ -412,7 +420,7 @@ func (f *Handler) logQueryRequest(r *http.Request, queryString url.Values, sourc // reportSlowQuery reports slow queries. func (f *Handler) reportSlowQuery(r *http.Request, queryString url.Values, queryResponseTime time.Duration) { - logMessage := []interface{}{ + logMessage := []any{ "msg", "slow query detected", "method", r.Method, "host", r.Host, @@ -466,7 +474,7 @@ func (f *Handler) reportQueryStats(r *http.Request, source, userID string, query } // Log stats. - logMessage := append([]interface{}{ + logMessage := append([]any{ "msg", "query stats", "component", "query-frontend", "method", r.Method, @@ -511,6 +519,12 @@ func (f *Handler) reportQueryStats(r *http.Request, source, userID string, query if ua := r.Header.Get("User-Agent"); len(ua) > 0 { logMessage = append(logMessage, "user_agent", ua) } + if engineType := r.Header.Get(engine.TypeHeader); len(engineType) > 0 { + logMessage = append(logMessage, "engine_type", engineType) + } + if blockStoreType := r.Header.Get(querier.BlockStoreTypeHeader); len(blockStoreType) > 0 { + logMessage = append(logMessage, "block_store_type", blockStoreType) + } if priority, ok := stats.LoadPriority(); ok { logMessage = append(logMessage, "priority", priority) } @@ -533,7 +547,7 @@ func (f *Handler) reportQueryStats(r *http.Request, source, userID string, query } } - shouldLog := source == tripperware.SourceAPI || (f.cfg.EnabledRulerQueryStatsLog && source == tripperware.SourceRuler) + shouldLog := source == requestmeta.SourceAPI || (f.cfg.EnabledRulerQueryStatsLog && source == requestmeta.SourceRuler) if shouldLog { logMessage = append(logMessage, formatQueryString(queryString)...) if error != nil { @@ -571,7 +585,10 @@ func (f *Handler) reportQueryStats(r *http.Request, source, userID string, query reason = reasonChunksLimitStoreGateway } else if strings.Contains(errMsg, limitBytesStoreGateway) { reason = reasonBytesLimitStoreGateway - } else if strings.Contains(errMsg, limiter.ErrResourceLimitReachedStr) { + } + } else if statusCode == http.StatusServiceUnavailable && error != nil { + errMsg := error.Error() + if strings.Contains(errMsg, limiter.ErrResourceLimitReachedStr) { reason = reasonResourceExhausted } } @@ -595,12 +612,12 @@ func (f *Handler) parseRequestQueryString(r *http.Request, bodyBuf bytes.Buffer) return r.Form } -func formatQueryString(queryString url.Values) (fields []interface{}) { - var queryFields []interface{} +func formatQueryString(queryString url.Values) (fields []any) { + var queryFields []any for k, v := range queryString { // If `query` or `match[]` field exists, we always put it as the last field. if k == "query" || k == "match[]" { - queryFields = []interface{}{fmt.Sprintf("param_%s", k), strings.Join(v, ",")} + queryFields = []any{fmt.Sprintf("param_%s", k), strings.Join(v, ",")} continue } fields = append(fields, fmt.Sprintf("param_%s", k), strings.Join(v, ",")) diff --git a/pkg/frontend/transport/handler_test.go b/pkg/frontend/transport/handler_test.go index aa86323029..b6c90a31fc 100644 --- a/pkg/frontend/transport/handler_test.go +++ b/pkg/frontend/transport/handler_test.go @@ -24,13 +24,15 @@ import ( "github.com/weaveworks/common/user" "google.golang.org/grpc/codes" + "github.com/cortexproject/cortex/pkg/engine" + "github.com/cortexproject/cortex/pkg/querier" querier_stats "github.com/cortexproject/cortex/pkg/querier/stats" "github.com/cortexproject/cortex/pkg/querier/tenantfederation" - "github.com/cortexproject/cortex/pkg/querier/tripperware" "github.com/cortexproject/cortex/pkg/tenant" util_api "github.com/cortexproject/cortex/pkg/util/api" "github.com/cortexproject/cortex/pkg/util/limiter" util_log "github.com/cortexproject/cortex/pkg/util/log" + "github.com/cortexproject/cortex/pkg/util/requestmeta" ) type roundTripperFunc func(*http.Request) (*http.Response, error) @@ -216,7 +218,7 @@ func TestHandler_ServeHTTP(t *testing.T) { }, nil }), additionalMetricsCheckFunc: func(h *Handler) { - v := promtest.ToFloat64(h.rejectedQueries.WithLabelValues(reasonResponseBodySizeExceeded, tripperware.SourceAPI, userID)) + v := promtest.ToFloat64(h.rejectedQueries.WithLabelValues(reasonResponseBodySizeExceeded, requestmeta.SourceAPI, userID)) assert.Equal(t, float64(1), v) }, expectedStatusCode: http.StatusRequestEntityTooLarge, @@ -232,7 +234,7 @@ func TestHandler_ServeHTTP(t *testing.T) { }, nil }), additionalMetricsCheckFunc: func(h *Handler) { - v := promtest.ToFloat64(h.rejectedQueries.WithLabelValues(reasonTooManyRequests, tripperware.SourceAPI, userID)) + v := promtest.ToFloat64(h.rejectedQueries.WithLabelValues(reasonTooManyRequests, requestmeta.SourceAPI, userID)) assert.Equal(t, float64(1), v) }, expectedStatusCode: http.StatusTooManyRequests, @@ -248,7 +250,7 @@ func TestHandler_ServeHTTP(t *testing.T) { }, nil }), additionalMetricsCheckFunc: func(h *Handler) { - v := promtest.ToFloat64(h.rejectedQueries.WithLabelValues(reasonTooManySamples, tripperware.SourceAPI, userID)) + v := promtest.ToFloat64(h.rejectedQueries.WithLabelValues(reasonTooManySamples, requestmeta.SourceAPI, userID)) assert.Equal(t, float64(1), v) }, expectedStatusCode: http.StatusUnprocessableEntity, @@ -264,7 +266,7 @@ func TestHandler_ServeHTTP(t *testing.T) { }, nil }), additionalMetricsCheckFunc: func(h *Handler) { - v := promtest.ToFloat64(h.rejectedQueries.WithLabelValues(reasonTimeRangeExceeded, tripperware.SourceAPI, userID)) + v := promtest.ToFloat64(h.rejectedQueries.WithLabelValues(reasonTimeRangeExceeded, requestmeta.SourceAPI, userID)) assert.Equal(t, float64(1), v) }, expectedStatusCode: http.StatusUnprocessableEntity, @@ -280,7 +282,7 @@ func TestHandler_ServeHTTP(t *testing.T) { }, nil }), additionalMetricsCheckFunc: func(h *Handler) { - v := promtest.ToFloat64(h.rejectedQueries.WithLabelValues(reasonSeriesFetched, tripperware.SourceAPI, userID)) + v := promtest.ToFloat64(h.rejectedQueries.WithLabelValues(reasonSeriesFetched, requestmeta.SourceAPI, userID)) assert.Equal(t, float64(1), v) }, expectedStatusCode: http.StatusUnprocessableEntity, @@ -296,7 +298,7 @@ func TestHandler_ServeHTTP(t *testing.T) { }, nil }), additionalMetricsCheckFunc: func(h *Handler) { - v := promtest.ToFloat64(h.rejectedQueries.WithLabelValues(reasonChunksFetched, tripperware.SourceAPI, userID)) + v := promtest.ToFloat64(h.rejectedQueries.WithLabelValues(reasonChunksFetched, requestmeta.SourceAPI, userID)) assert.Equal(t, float64(1), v) }, expectedStatusCode: http.StatusUnprocessableEntity, @@ -312,7 +314,7 @@ func TestHandler_ServeHTTP(t *testing.T) { }, nil }), additionalMetricsCheckFunc: func(h *Handler) { - v := promtest.ToFloat64(h.rejectedQueries.WithLabelValues(reasonChunkBytesFetched, tripperware.SourceAPI, userID)) + v := promtest.ToFloat64(h.rejectedQueries.WithLabelValues(reasonChunkBytesFetched, requestmeta.SourceAPI, userID)) assert.Equal(t, float64(1), v) }, expectedStatusCode: http.StatusUnprocessableEntity, @@ -328,7 +330,7 @@ func TestHandler_ServeHTTP(t *testing.T) { }, nil }), additionalMetricsCheckFunc: func(h *Handler) { - v := promtest.ToFloat64(h.rejectedQueries.WithLabelValues(reasonDataBytesFetched, tripperware.SourceAPI, userID)) + v := promtest.ToFloat64(h.rejectedQueries.WithLabelValues(reasonDataBytesFetched, requestmeta.SourceAPI, userID)) assert.Equal(t, float64(1), v) }, expectedStatusCode: http.StatusUnprocessableEntity, @@ -344,7 +346,7 @@ func TestHandler_ServeHTTP(t *testing.T) { }, nil }), additionalMetricsCheckFunc: func(h *Handler) { - v := promtest.ToFloat64(h.rejectedQueries.WithLabelValues(reasonSeriesLimitStoreGateway, tripperware.SourceAPI, userID)) + v := promtest.ToFloat64(h.rejectedQueries.WithLabelValues(reasonSeriesLimitStoreGateway, requestmeta.SourceAPI, userID)) assert.Equal(t, float64(1), v) }, expectedStatusCode: http.StatusUnprocessableEntity, @@ -360,7 +362,7 @@ func TestHandler_ServeHTTP(t *testing.T) { }, nil }), additionalMetricsCheckFunc: func(h *Handler) { - v := promtest.ToFloat64(h.rejectedQueries.WithLabelValues(reasonChunksLimitStoreGateway, tripperware.SourceAPI, userID)) + v := promtest.ToFloat64(h.rejectedQueries.WithLabelValues(reasonChunksLimitStoreGateway, requestmeta.SourceAPI, userID)) assert.Equal(t, float64(1), v) }, expectedStatusCode: http.StatusUnprocessableEntity, @@ -376,7 +378,7 @@ func TestHandler_ServeHTTP(t *testing.T) { }, nil }), additionalMetricsCheckFunc: func(h *Handler) { - v := promtest.ToFloat64(h.rejectedQueries.WithLabelValues(reasonBytesLimitStoreGateway, tripperware.SourceAPI, userID)) + v := promtest.ToFloat64(h.rejectedQueries.WithLabelValues(reasonBytesLimitStoreGateway, requestmeta.SourceAPI, userID)) assert.Equal(t, float64(1), v) }, expectedStatusCode: http.StatusUnprocessableEntity, @@ -386,17 +388,17 @@ func TestHandler_ServeHTTP(t *testing.T) { cfg: HandlerConfig{QueryStatsEnabled: true}, expectedMetrics: 6, roundTripperFunc: roundTripperFunc(func(req *http.Request) (*http.Response, error) { - resourceLimitReachedErr := &limiter.ResourceLimitReachedError{} + resourceLimitReachedErr := limiter.ErrResourceLimitReached return &http.Response{ - StatusCode: http.StatusUnprocessableEntity, + StatusCode: http.StatusServiceUnavailable, Body: io.NopCloser(strings.NewReader(resourceLimitReachedErr.Error())), }, nil }), additionalMetricsCheckFunc: func(h *Handler) { - v := promtest.ToFloat64(h.rejectedQueries.WithLabelValues(reasonResourceExhausted, tripperware.SourceAPI, userID)) + v := promtest.ToFloat64(h.rejectedQueries.WithLabelValues(reasonResourceExhausted, requestmeta.SourceAPI, userID)) assert.Equal(t, float64(1), v) }, - expectedStatusCode: http.StatusUnprocessableEntity, + expectedStatusCode: http.StatusServiceUnavailable, }, { name: "test cortex_slow_queries_total", @@ -410,7 +412,7 @@ func TestHandler_ServeHTTP(t *testing.T) { }, nil }), additionalMetricsCheckFunc: func(h *Handler) { - v := promtest.ToFloat64(h.slowQueries.WithLabelValues(tripperware.SourceAPI, userID)) + v := promtest.ToFloat64(h.slowQueries.WithLabelValues(requestmeta.SourceAPI, userID)) assert.Equal(t, float64(1), v) }, expectedStatusCode: http.StatusOK, @@ -472,12 +474,12 @@ func TestReportQueryStatsFormat(t *testing.T) { tests := map[string]testCase{ "should not include query and header details if empty": { expectedLog: `level=info msg="query stats" component=query-frontend method=GET path=/prometheus/api/v1/query response_time=1s query_wall_time_seconds=0 response_series_count=0 fetched_series_count=0 fetched_chunks_count=0 fetched_samples_count=0 fetched_chunks_bytes=0 fetched_data_bytes=0 split_queries=0 status_code=200 response_size=1000 samples_scanned=0`, - source: tripperware.SourceAPI, + source: requestmeta.SourceAPI, }, "should include query length and string at the end": { queryString: url.Values(map[string][]string{"query": {"up"}}), expectedLog: `level=info msg="query stats" component=query-frontend method=GET path=/prometheus/api/v1/query response_time=1s query_wall_time_seconds=0 response_series_count=0 fetched_series_count=0 fetched_chunks_count=0 fetched_samples_count=0 fetched_chunks_bytes=0 fetched_data_bytes=0 split_queries=0 status_code=200 response_size=1000 samples_scanned=0 query_length=2 param_query=up`, - source: tripperware.SourceAPI, + source: requestmeta.SourceAPI, }, "should include query stats": { queryStats: &querier_stats.QueryStats{ @@ -494,17 +496,27 @@ func TestReportQueryStatsFormat(t *testing.T) { }, }, expectedLog: `level=info msg="query stats" component=query-frontend method=GET path=/prometheus/api/v1/query response_time=1s query_wall_time_seconds=3 response_series_count=100 fetched_series_count=100 fetched_chunks_count=200 fetched_samples_count=300 fetched_chunks_bytes=1024 fetched_data_bytes=2048 split_queries=10 status_code=200 response_size=1000 samples_scanned=0 query_storage_wall_time_seconds=6000`, - source: tripperware.SourceAPI, + source: requestmeta.SourceAPI, }, "should include user agent": { header: http.Header{"User-Agent": []string{"Grafana"}}, expectedLog: `level=info msg="query stats" component=query-frontend method=GET path=/prometheus/api/v1/query response_time=1s query_wall_time_seconds=0 response_series_count=0 fetched_series_count=0 fetched_chunks_count=0 fetched_samples_count=0 fetched_chunks_bytes=0 fetched_data_bytes=0 split_queries=0 status_code=200 response_size=1000 samples_scanned=0 user_agent=Grafana`, - source: tripperware.SourceAPI, + source: requestmeta.SourceAPI, + }, + "should include engine type": { + header: http.Header{http.CanonicalHeaderKey(engine.TypeHeader): []string{string(engine.Thanos)}}, + expectedLog: `level=info msg="query stats" component=query-frontend method=GET path=/prometheus/api/v1/query response_time=1s query_wall_time_seconds=0 response_series_count=0 fetched_series_count=0 fetched_chunks_count=0 fetched_samples_count=0 fetched_chunks_bytes=0 fetched_data_bytes=0 split_queries=0 status_code=200 response_size=1000 samples_scanned=0 engine_type=thanos`, + source: requestmeta.SourceAPI, + }, + "should include block store type": { + header: http.Header{http.CanonicalHeaderKey(querier.BlockStoreTypeHeader): []string{"parquet"}}, + expectedLog: `level=info msg="query stats" component=query-frontend method=GET path=/prometheus/api/v1/query response_time=1s query_wall_time_seconds=0 response_series_count=0 fetched_series_count=0 fetched_chunks_count=0 fetched_samples_count=0 fetched_chunks_bytes=0 fetched_data_bytes=0 split_queries=0 status_code=200 response_size=1000 samples_scanned=0 block_store_type=parquet`, + source: requestmeta.SourceAPI, }, "should include response error": { responseErr: errors.New("foo_err"), expectedLog: `level=error msg="query stats" component=query-frontend method=GET path=/prometheus/api/v1/query response_time=1s query_wall_time_seconds=0 response_series_count=0 fetched_series_count=0 fetched_chunks_count=0 fetched_samples_count=0 fetched_chunks_bytes=0 fetched_data_bytes=0 split_queries=0 status_code=200 response_size=1000 samples_scanned=0 error=foo_err`, - source: tripperware.SourceAPI, + source: requestmeta.SourceAPI, }, "should include query priority": { queryString: url.Values(map[string][]string{"query": {"up"}}), @@ -513,7 +525,7 @@ func TestReportQueryStatsFormat(t *testing.T) { PriorityAssigned: true, }, expectedLog: `level=info msg="query stats" component=query-frontend method=GET path=/prometheus/api/v1/query response_time=1s query_wall_time_seconds=0 response_series_count=0 fetched_series_count=0 fetched_chunks_count=0 fetched_samples_count=0 fetched_chunks_bytes=0 fetched_data_bytes=0 split_queries=0 status_code=200 response_size=1000 samples_scanned=0 query_length=2 priority=99 param_query=up`, - source: tripperware.SourceAPI, + source: requestmeta.SourceAPI, }, "should include data fetch min and max time": { queryString: url.Values(map[string][]string{"query": {"up"}}), @@ -522,7 +534,7 @@ func TestReportQueryStatsFormat(t *testing.T) { DataSelectMinTime: 1704067200000, }, expectedLog: `level=info msg="query stats" component=query-frontend method=GET path=/prometheus/api/v1/query response_time=1s query_wall_time_seconds=0 response_series_count=0 fetched_series_count=0 fetched_chunks_count=0 fetched_samples_count=0 fetched_chunks_bytes=0 fetched_data_bytes=0 split_queries=0 status_code=200 response_size=1000 samples_scanned=0 data_select_max_time=1704153600 data_select_min_time=1704067200 query_length=2 param_query=up`, - source: tripperware.SourceAPI, + source: requestmeta.SourceAPI, }, "should include query stats with store gateway stats": { queryStats: &querier_stats.QueryStats{ @@ -541,16 +553,16 @@ func TestReportQueryStatsFormat(t *testing.T) { }, }, expectedLog: `level=info msg="query stats" component=query-frontend method=GET path=/prometheus/api/v1/query response_time=1s query_wall_time_seconds=3 response_series_count=100 fetched_series_count=100 fetched_chunks_count=200 fetched_samples_count=300 fetched_chunks_bytes=1024 fetched_data_bytes=2048 split_queries=10 status_code=200 response_size=1000 samples_scanned=0 store_gateway_touched_postings_count=20 store_gateway_touched_posting_bytes=200 query_storage_wall_time_seconds=6000`, - source: tripperware.SourceAPI, + source: requestmeta.SourceAPI, }, "should not report a log": { expectedLog: ``, - source: tripperware.SourceRuler, + source: requestmeta.SourceRuler, enabledRulerQueryStatsLog: false, }, "should report a log": { expectedLog: `level=info msg="query stats" component=query-frontend method=GET path=/prometheus/api/v1/query response_time=1s query_wall_time_seconds=0 response_series_count=0 fetched_series_count=0 fetched_chunks_count=0 fetched_samples_count=0 fetched_chunks_bytes=0 fetched_data_bytes=0 split_queries=0 status_code=200 response_size=1000 samples_scanned=0`, - source: tripperware.SourceRuler, + source: requestmeta.SourceRuler, enabledRulerQueryStatsLog: true, }, } @@ -559,6 +571,7 @@ func TestReportQueryStatsFormat(t *testing.T) { t.Run(testName, func(t *testing.T) { handler := NewHandler(HandlerConfig{QueryStatsEnabled: true, EnabledRulerQueryStatsLog: testData.enabledRulerQueryStatsLog}, tenantfederation.Config{}, http.DefaultTransport, logger, nil) req.Header = testData.header + req = req.WithContext(requestmeta.ContextWithRequestSource(context.Background(), testData.source)) handler.reportQueryStats(req, testData.source, userID, testData.queryString, responseTime, testData.queryStats, testData.responseErr, statusCode, resp) data, err := io.ReadAll(outputBuf) require.NoError(t, err) @@ -706,7 +719,7 @@ func Test_TenantFederation_MaxTenant(t *testing.T) { require.Contains(t, string(body), test.expectedErrMsg) if strings.Contains(test.expectedErrMsg, "too many tenants") { - v := promtest.ToFloat64(handler.rejectedQueries.WithLabelValues(reasonTooManyTenants, tripperware.SourceAPI, test.orgId)) + v := promtest.ToFloat64(handler.rejectedQueries.WithLabelValues(reasonTooManyTenants, requestmeta.SourceAPI, test.orgId)) assert.Equal(t, float64(1), v) } } @@ -828,3 +841,41 @@ func TestHandlerMetricsCleanup(t *testing.T) { "cortex_query_samples_scanned_total", "cortex_query_peak_samples", "cortex_query_fetched_chunks_bytes_total", "cortex_query_fetched_data_bytes_total", "cortex_rejected_queries_total", "cortex_slow_queries_total")) } + +func TestHandler_RemoteReadRequest_DoesNotParseQueryString(t *testing.T) { + // Create a mock round tripper that captures the request + var capturedRequest *http.Request + roundTripper := roundTripperFunc(func(req *http.Request) (*http.Response, error) { + capturedRequest = req + return &http.Response{ + StatusCode: http.StatusOK, + Body: io.NopCloser(strings.NewReader("{}")), + }, nil + }) + + // Use a larger MaxBodySize to avoid the "request body too large" error + handler := NewHandler(HandlerConfig{QueryStatsEnabled: true, MaxBodySize: 10 * 1024 * 1024}, tenantfederation.Config{}, roundTripper, log.NewNopLogger(), nil) + handlerWithAuth := middleware.Merge(middleware.AuthenticateUser).Wrap(handler) + + // Create a remote read request with a body that would be corrupted by parseRequestQueryString + originalBody := "snappy-compressed-data" + req := httptest.NewRequest("POST", "http://fake/api/v1/read", strings.NewReader(originalBody)) + req.Header.Set("X-Scope-OrgId", "user-1") + req.Header.Set("Content-Type", "application/x-protobuf") + req.Header.Set("Content-Encoding", "snappy") + + resp := httptest.NewRecorder() + handlerWithAuth.ServeHTTP(resp, req) + + // Verify the request was successful + require.Equal(t, http.StatusOK, resp.Code) + + // Verify that the original request body was preserved and not corrupted + require.NotNil(t, capturedRequest) + bodyBytes, err := io.ReadAll(capturedRequest.Body) + require.NoError(t, err) + require.Equal(t, originalBody, string(bodyBytes)) + + // Verify that the request body is still readable (not replaced with empty buffer) + require.NotEmpty(t, string(bodyBytes)) +} diff --git a/pkg/frontend/v1/queue_test.go b/pkg/frontend/v1/queue_test.go index a11cfe1513..35d5f2010e 100644 --- a/pkg/frontend/v1/queue_test.go +++ b/pkg/frontend/v1/queue_test.go @@ -61,7 +61,7 @@ func TestDequeuesExpiredRequests(t *testing.T) { cancel() good := 0 - for i := 0; i < 10; i++ { + for i := range 10 { var err error if i%5 == 0 { good++ @@ -101,7 +101,7 @@ func TestRoundRobinQueues(t *testing.T) { f, err := setupFrontend(t, requests, config) require.NoError(t, err) - for i := 0; i < requests; i++ { + for i := range requests { userID := fmt.Sprint(i / tenants) ctx := user.InjectOrgID(context.Background(), userID) @@ -167,5 +167,5 @@ func (p *processServerMock) SetHeader(_ metadata.MD) error { return nil } func (p *processServerMock) SendHeader(_ metadata.MD) error { return nil } func (p *processServerMock) SetTrailer(md metadata.MD) {} func (p *processServerMock) Context() context.Context { return p.ctx } -func (p *processServerMock) SendMsg(m interface{}) error { return nil } -func (p *processServerMock) RecvMsg(m interface{}) error { return nil } +func (p *processServerMock) SendMsg(m any) error { return nil } +func (p *processServerMock) RecvMsg(m any) error { return nil } diff --git a/pkg/frontend/v2/frontend_test.go b/pkg/frontend/v2/frontend_test.go index 676070ca0f..5ba83213d8 100644 --- a/pkg/frontend/v2/frontend_test.go +++ b/pkg/frontend/v2/frontend_test.go @@ -72,7 +72,7 @@ func setupFrontend(t *testing.T, schedulerReplyFunc func(f *Frontend, msg *sched }) // Wait for frontend to connect to scheduler. - test.Poll(t, 1*time.Second, 1, func() interface{} { + test.Poll(t, 1*time.Second, 1, func() any { ms.mu.Lock() defer ms.mu.Unlock() @@ -206,7 +206,7 @@ func TestFrontendCancellation(t *testing.T) { require.Nil(t, resp) // We wait a bit to make sure scheduler receives the cancellation request. - test.Poll(t, time.Second, 2, func() interface{} { + test.Poll(t, time.Second, 2, func() any { ms.mu.Lock() defer ms.mu.Unlock() diff --git a/pkg/ha/ha_tracker.go b/pkg/ha/ha_tracker.go index cc0ae8d8f6..6ff0dc4267 100644 --- a/pkg/ha/ha_tracker.go +++ b/pkg/ha/ha_tracker.go @@ -6,6 +6,7 @@ import ( "flag" "fmt" "math/rand" + "slices" "strings" "sync" "time" @@ -109,10 +110,8 @@ func (cfg *HATrackerConfig) Validate() error { // Tracker kv store only supports consul and etcd. storeAllowedList := []string{"consul", "etcd"} - for _, as := range storeAllowedList { - if cfg.KVStore.Store == as { - return nil - } + if slices.Contains(storeAllowedList, cfg.KVStore.Store) { + return nil } return fmt.Errorf("invalid HATracker KV store type: %s", cfg.KVStore.Store) } @@ -260,7 +259,7 @@ func (c *HATracker) loop(ctx context.Context) error { // The KVStore config we gave when creating c should have contained a prefix, // which would have given us a prefixed KVStore client. So, we can pass empty string here. - c.client.WatchPrefix(ctx, "", func(key string, value interface{}) bool { + c.client.WatchPrefix(ctx, "", func(key string, value any) bool { replica := value.(*ReplicaDesc) user, cluster, keyHasSeparator := strings.Cut(key, "/") @@ -383,7 +382,7 @@ func (c *HATracker) cleanupOldReplicas(ctx context.Context, deadline time.Time) // Not marked as deleted yet. if desc.DeletedAt == 0 && timestamp.Time(desc.ReceivedAt).Before(deadline) { - err := c.client.CAS(ctx, key, func(in interface{}) (out interface{}, retry bool, err error) { + err := c.client.CAS(ctx, key, func(in any) (out any, retry bool, err error) { d, ok := in.(*ReplicaDesc) if !ok || d == nil || d.DeletedAt > 0 || !timestamp.Time(desc.ReceivedAt).Before(deadline) { return nil, false, nil @@ -452,7 +451,7 @@ func (c *HATracker) CheckReplica(ctx context.Context, userID, replicaGroup, repl } func (c *HATracker) checkKVStore(ctx context.Context, key, replica string, now time.Time) error { - return c.client.CAS(ctx, key, func(in interface{}) (out interface{}, retry bool, err error) { + return c.client.CAS(ctx, key, func(in any) (out any, retry bool, err error) { if desc, ok := in.(*ReplicaDesc); ok && desc.DeletedAt == 0 { // We don't need to CAS and update the timestamp in the KV store if the timestamp we've received // this sample at is less than updateTimeout amount of time since the timestamp in the KV store. diff --git a/pkg/ha/ha_tracker_test.go b/pkg/ha/ha_tracker_test.go index 563d790793..3d576082aa 100644 --- a/pkg/ha/ha_tracker_test.go +++ b/pkg/ha/ha_tracker_test.go @@ -39,7 +39,7 @@ func checkReplicaTimestamp(t *testing.T, duration time.Duration, c *HATracker, u // to match "received at" precision expected = expected.Truncate(time.Millisecond) - test.Poll(t, duration, nil, func() interface{} { + test.Poll(t, duration, nil, func() any { c.electedLock.RLock() r := c.elected[key] c.electedLock.RUnlock() @@ -120,7 +120,6 @@ func TestHATrackerConfig_Validate(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { t.Parallel() assert.Equal(t, testData.expectedErr, testData.cfg.Validate()) @@ -455,7 +454,6 @@ func TestCheckReplicaUpdateTimeoutJitter(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { t.Parallel() // Init HA tracker @@ -573,7 +571,7 @@ func TestHAClustersLimit(t *testing.T) { func waitForClustersUpdate(t *testing.T, expected int, tr *HATracker, userID string) { t.Helper() - test.Poll(t, 2*time.Second, expected, func() interface{} { + test.Poll(t, 2*time.Second, expected, func() any { tr.electedLock.RLock() defer tr.electedLock.RUnlock() @@ -762,7 +760,7 @@ func TestCheckReplicaCleanup(t *testing.T) { func checkUserReplicaGroups(t *testing.T, duration time.Duration, c *HATracker, user string, expectedReplicaGroups int) { t.Helper() - test.Poll(t, duration, nil, func() interface{} { + test.Poll(t, duration, nil, func() any { c.electedLock.RLock() cl := len(c.replicaGroups[user]) c.electedLock.RUnlock() @@ -778,7 +776,7 @@ func checkUserReplicaGroups(t *testing.T, duration time.Duration, c *HATracker, func checkReplicaDeletionState(t *testing.T, duration time.Duration, c *HATracker, user, replicaGroup string, expectedExistsInMemory, expectedExistsInKV, expectedMarkedForDeletion bool) { key := fmt.Sprintf("%s/%s", user, replicaGroup) - test.Poll(t, duration, nil, func() interface{} { + test.Poll(t, duration, nil, func() any { c.electedLock.RLock() _, exists := c.elected[key] c.electedLock.RUnlock() diff --git a/pkg/ingester/active_series.go b/pkg/ingester/active_series.go index 1c3bf4c6d8..57134a03ca 100644 --- a/pkg/ingester/active_series.go +++ b/pkg/ingester/active_series.go @@ -42,7 +42,7 @@ func NewActiveSeries() *ActiveSeries { c := &ActiveSeries{} // Stripes are pre-allocated so that we only read on them and no lock is required. - for i := 0; i < numActiveSeriesStripes; i++ { + for i := range numActiveSeriesStripes { c.stripes[i].refs = map[uint64][]activeSeriesEntry{} } @@ -59,21 +59,21 @@ func (c *ActiveSeries) UpdateSeries(series labels.Labels, hash uint64, now time. // Purge removes expired entries from the cache. This function should be called // periodically to avoid memory leaks. func (c *ActiveSeries) Purge(keepUntil time.Time) { - for s := 0; s < numActiveSeriesStripes; s++ { + for s := range numActiveSeriesStripes { c.stripes[s].purge(keepUntil) } } // nolint // Linter reports that this method is unused, but it is. func (c *ActiveSeries) clear() { - for s := 0; s < numActiveSeriesStripes; s++ { + for s := range numActiveSeriesStripes { c.stripes[s].clear() } } func (c *ActiveSeries) Active() int { total := 0 - for s := 0; s < numActiveSeriesStripes; s++ { + for s := range numActiveSeriesStripes { total += c.stripes[s].getActive() } return total @@ -81,7 +81,7 @@ func (c *ActiveSeries) Active() int { func (c *ActiveSeries) ActiveNativeHistogram() int { total := 0 - for s := 0; s < numActiveSeriesStripes; s++ { + for s := range numActiveSeriesStripes { total += c.stripes[s].getActiveNativeHistogram() } return total diff --git a/pkg/ingester/active_series_test.go b/pkg/ingester/active_series_test.go index 3d84d7570c..49a24c8936 100644 --- a/pkg/ingester/active_series_test.go +++ b/pkg/ingester/active_series_test.go @@ -29,15 +29,15 @@ func TestActiveSeries_UpdateSeries(t *testing.T) { assert.Equal(t, 0, c.ActiveNativeHistogram()) labels1Hash := fromLabelToLabels(ls1).Hash() labels2Hash := fromLabelToLabels(ls2).Hash() - c.UpdateSeries(ls1, labels1Hash, time.Now(), true, copyFn) + c.UpdateSeries(fromLabelToLabels(ls1), labels1Hash, time.Now(), true, copyFn) assert.Equal(t, 1, c.Active()) assert.Equal(t, 1, c.ActiveNativeHistogram()) - c.UpdateSeries(ls1, labels1Hash, time.Now(), true, copyFn) + c.UpdateSeries(fromLabelToLabels(ls1), labels1Hash, time.Now(), true, copyFn) assert.Equal(t, 1, c.Active()) assert.Equal(t, 1, c.ActiveNativeHistogram()) - c.UpdateSeries(ls2, labels2Hash, time.Now(), true, copyFn) + c.UpdateSeries(fromLabelToLabels(ls2), labels2Hash, time.Now(), true, copyFn) assert.Equal(t, 2, c.Active()) assert.Equal(t, 2, c.ActiveNativeHistogram()) } @@ -52,11 +52,11 @@ func TestActiveSeries_Purge(t *testing.T) { } // Run the same test for increasing TTL values - for ttl := 0; ttl < len(series); ttl++ { + for ttl := range series { c := NewActiveSeries() - for i := 0; i < len(series); i++ { - c.UpdateSeries(series[i], fromLabelToLabels(series[i]).Hash(), time.Unix(int64(i), 0), true, copyFn) + for i := range series { + c.UpdateSeries(fromLabelToLabels(series[i]), fromLabelToLabels(series[i]).Hash(), time.Unix(int64(i), 0), true, copyFn) } c.Purge(time.Unix(int64(ttl+1), 0)) @@ -109,9 +109,7 @@ func BenchmarkActiveSeriesTest_single_series(b *testing.B) { } func benchmarkActiveSeriesConcurrencySingleSeries(b *testing.B, goroutines int) { - series := labels.Labels{ - {Name: "a", Value: "a"}, - } + series := labels.FromStrings("a", "a") c := NewActiveSeries() @@ -119,7 +117,7 @@ func benchmarkActiveSeriesConcurrencySingleSeries(b *testing.B, goroutines int) start := make(chan struct{}) max := int(math.Ceil(float64(b.N) / float64(goroutines))) labelhash := series.Hash() - for i := 0; i < goroutines; i++ { + for range goroutines { wg.Add(1) go func() { defer wg.Done() @@ -127,7 +125,7 @@ func benchmarkActiveSeriesConcurrencySingleSeries(b *testing.B, goroutines int) now := time.Now() - for ix := 0; ix < max; ix++ { + for ix := range max { now = now.Add(time.Duration(ix) * time.Millisecond) c.UpdateSeries(series, labelhash, now, false, copyFn) } @@ -144,22 +142,21 @@ func BenchmarkActiveSeries_UpdateSeries(b *testing.B) { // Prepare series nameBuf := bytes.Buffer{} - for i := 0; i < 50; i++ { + for range 50 { nameBuf.WriteString("abcdefghijklmnopqrstuvzyx") } name := nameBuf.String() series := make([]labels.Labels, b.N) labelhash := make([]uint64, b.N) - for s := 0; s < b.N; s++ { - series[s] = labels.Labels{{Name: name, Value: name + strconv.Itoa(s)}} + for s := 0; b.Loop(); s++ { + series[s] = labels.FromStrings(name, name+strconv.Itoa(s)) labelhash[s] = series[s].Hash() } now := time.Now().UnixNano() - b.ResetTimer() - for ix := 0; ix < b.N; ix++ { + for ix := 0; b.Loop(); ix++ { c.UpdateSeries(series[ix], labelhash[ix], time.Unix(0, now+int64(ix)), false, copyFn) } } @@ -181,12 +178,12 @@ func benchmarkPurge(b *testing.B, twice bool) { series := [numSeries]labels.Labels{} labelhash := [numSeries]uint64{} - for s := 0; s < numSeries; s++ { - series[s] = labels.Labels{{Name: "a", Value: strconv.Itoa(s)}} + for s := range numSeries { + series[s] = labels.FromStrings("a", strconv.Itoa(s)) labelhash[s] = series[s].Hash() } - for i := 0; i < b.N; i++ { + for b.Loop() { b.StopTimer() // Prepare series diff --git a/pkg/ingester/client/client.go b/pkg/ingester/client/client.go index b52ac69634..ed8bacd45a 100644 --- a/pkg/ingester/client/client.go +++ b/pkg/ingester/client/client.go @@ -205,7 +205,7 @@ func (c *closableHealthAndIngesterClient) Run(streamPushChan chan *streamWriteJo var workerErr error var wg sync.WaitGroup - for i := 0; i < INGESTER_CLIENT_STREAM_WORKER_COUNT; i++ { + for i := range INGESTER_CLIENT_STREAM_WORKER_COUNT { workerName := fmt.Sprintf("ingester-%s-stream-push-worker-%d", c.addr, i) wg.Add(1) go func() { diff --git a/pkg/ingester/client/client_test.go b/pkg/ingester/client/client_test.go index da41b03636..02edc8d070 100644 --- a/pkg/ingester/client/client_test.go +++ b/pkg/ingester/client/client_test.go @@ -22,7 +22,7 @@ func TestMarshall(t *testing.T) { recorder := httptest.NewRecorder() { req := cortexpb.WriteRequest{} - for i := 0; i < numSeries; i++ { + for i := range numSeries { req.Timeseries = append(req.Timeseries, cortexpb.PreallocTimeseries{ TimeSeries: &cortexpb.TimeSeries{ Labels: []cortexpb.LabelAdapter{ diff --git a/pkg/ingester/client/compat_test.go b/pkg/ingester/client/compat_test.go index 9914af6d06..8c90d58560 100644 --- a/pkg/ingester/client/compat_test.go +++ b/pkg/ingester/client/compat_test.go @@ -63,7 +63,7 @@ func matchersEqual(expected, actual []*labels.Matcher) bool { return false } - for i := 0; i < len(expected); i++ { + for i := range expected { a := actual[i] e := expected[i] if a.Name != e.Name || a.Value != e.Value || a.Type != e.Type { @@ -85,8 +85,8 @@ func benchmarkSeriesMap(numSeries int, b *testing.B) { sm := make(map[string]int, numSeries) b.ReportAllocs() - b.ResetTimer() - for n := 0; n < b.N; n++ { + + for b.Loop() { for i, s := range series { sm[LabelsToKeyString(s)] = i } @@ -106,7 +106,7 @@ func benchmarkSeriesMap(numSeries int, b *testing.B) { func makeSeries(n int) []labels.Labels { series := make([]labels.Labels, 0, n) - for i := 0; i < n; i++ { + for i := range n { series = append(series, labels.FromMap(map[string]string{ "label0": "value0", "label1": "value1", diff --git a/pkg/ingester/client/cortex_util.go b/pkg/ingester/client/cortex_util.go index b3ba0e2d2b..5d463d49a7 100644 --- a/pkg/ingester/client/cortex_util.go +++ b/pkg/ingester/client/cortex_util.go @@ -32,10 +32,7 @@ func SendLabelNamesStream(s Ingester_LabelNamesStreamServer, l *LabelNamesStream func SendAsBatchToStream(totalItems int, streamBatchSize int, fn func(start, end int) error) error { for i := 0; i < totalItems; i += streamBatchSize { - j := i + streamBatchSize - if j > totalItems { - j = totalItems - } + j := min(i+streamBatchSize, totalItems) if err := fn(i, j); err != nil { return err } diff --git a/pkg/ingester/client/cortex_util_test.go b/pkg/ingester/client/cortex_util_test.go index 3058026ebe..3f1e02ddbc 100644 --- a/pkg/ingester/client/cortex_util_test.go +++ b/pkg/ingester/client/cortex_util_test.go @@ -117,7 +117,7 @@ func TestStreamingSends(t *testing.T) { clientCancel() // Wait until the cancelling has been propagated to the server. - test.Poll(t, time.Second, context.Canceled, func() interface{} { + test.Poll(t, time.Second, context.Canceled, func() any { return stream.Context().Err() }) diff --git a/pkg/ingester/errors.go b/pkg/ingester/errors.go index b982f6ce09..7da2f51b73 100644 --- a/pkg/ingester/errors.go +++ b/pkg/ingester/errors.go @@ -35,7 +35,7 @@ func (e *validationError) Error() string { if e.err == nil { return e.errorType } - if e.labels == nil { + if e.labels.IsEmpty() { return e.err.Error() } return fmt.Sprintf("%s for series %s", e.err.Error(), e.labels.String()) diff --git a/pkg/ingester/http_admin.go b/pkg/ingester/http_admin.go index 084e132db4..a5543c7d02 100644 --- a/pkg/ingester/http_admin.go +++ b/pkg/ingester/http_admin.go @@ -25,6 +25,8 @@ const tpl = ` {{if (gt .ReplicationFactor 0)}}

NB stats do not account for replication factor, which is currently set to {{ .ReplicationFactor }}

{{end}} +

These stats were aggregated from {{ .QueriedIngesterNum }} ingesters.

+
@@ -37,6 +39,7 @@ const tpl = ` + @@ -49,6 +52,7 @@ const tpl = ` + {{ end }} @@ -87,10 +91,11 @@ type UserStats struct { RuleIngestionRate float64 `json:"RuleIngestionRate"` ActiveSeries uint64 `json:"activeSeries"` LoadedBlocks uint64 `json:"loadedBlocks"` + QueriedIngesters uint64 `json:"queriedIngesters"` } // AllUserStatsRender render data for all users or return in json format. -func AllUserStatsRender(w http.ResponseWriter, r *http.Request, stats []UserIDStats, rf int) { +func AllUserStatsRender(w http.ResponseWriter, r *http.Request, stats []UserIDStats, rf, queriedIngesterNum int) { sort.Sort(UserStatsByTimeseries(stats)) if encodings, found := r.Header["Accept"]; found && @@ -102,12 +107,14 @@ func AllUserStatsRender(w http.ResponseWriter, r *http.Request, stats []UserIDSt } util.RenderHTTPResponse(w, struct { - Now time.Time `json:"now"` - Stats []UserIDStats `json:"stats"` - ReplicationFactor int `json:"replicationFactor"` + Now time.Time `json:"now"` + Stats []UserIDStats `json:"stats"` + ReplicationFactor int `json:"replicationFactor"` + QueriedIngesterNum int `json:"queriedIngesterNum"` }{ - Now: time.Now(), - Stats: stats, - ReplicationFactor: rf, + Now: time.Now(), + Stats: stats, + ReplicationFactor: rf, + QueriedIngesterNum: queriedIngesterNum, }, UserStatsTmpl, r) } diff --git a/pkg/ingester/http_admin_test.go b/pkg/ingester/http_admin_test.go index bb49b42cdc..6cc619871a 100644 --- a/pkg/ingester/http_admin_test.go +++ b/pkg/ingester/http_admin_test.go @@ -24,7 +24,7 @@ func TestUserStatsPageRendered(t *testing.T) { }, }, } - AllUserStatsRender(res, req, userStats, 3) + AllUserStatsRender(res, req, userStats, 3, 3) assert.Equal(t, http.StatusOK, res.Code) body := res.Body.String() assert.Regexp(t, "", body) diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go index dd2dc4f166..2330fdec59 100644 --- a/pkg/ingester/ingester.go +++ b/pkg/ingester/ingester.go @@ -33,7 +33,7 @@ import ( "github.com/prometheus/prometheus/tsdb" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunks" - "github.com/prometheus/prometheus/tsdb/wlog" + "github.com/prometheus/prometheus/util/compression" "github.com/prometheus/prometheus/util/zeropool" "github.com/thanos-io/objstore" "github.com/thanos-io/thanos/pkg/block/metadata" @@ -213,7 +213,7 @@ func (cfg *Config) getIgnoreSeriesLimitForMetricNamesMap() map[string]struct{} { result := map[string]struct{}{} - for _, s := range strings.Split(cfg.IgnoreSeriesLimitForMetricNames, ",") { + for s := range strings.SplitSeq(cfg.IgnoreSeriesLimitForMetricNames, ",") { tr := strings.TrimSpace(s) if tr != "" { result[tr] = struct{}{} @@ -1147,15 +1147,17 @@ type extendedAppender interface { storage.GetRef } -func (i *Ingester) isLabelSetOutOfOrder(labels labels.Labels) bool { +func (i *Ingester) isLabelSetOutOfOrder(lbls labels.Labels) bool { last := "" - for _, l := range labels { + ooo := false + lbls.Range(func(l labels.Label) { if strings.Compare(last, l.Name) > 0 { - return true + ooo = true } last = l.Name - } - return false + }) + + return ooo } // Push adds metrics to a block @@ -1257,22 +1259,27 @@ func (i *Ingester) Push(ctx context.Context, req *cortexpb.WriteRequest) (*corte switch cause := errors.Cause(err); { case errors.Is(cause, storage.ErrOutOfBounds): sampleOutOfBoundsCount++ + i.validateMetrics.DiscardedSeriesTracker.Track(sampleOutOfBounds, userID, copiedLabels.Hash()) updateFirstPartial(func() error { return wrappedTSDBIngestErr(err, model.Time(timestampMs), lbls) }) case errors.Is(cause, storage.ErrOutOfOrderSample): sampleOutOfOrderCount++ + i.validateMetrics.DiscardedSeriesTracker.Track(sampleOutOfOrder, userID, copiedLabels.Hash()) updateFirstPartial(func() error { return wrappedTSDBIngestErr(err, model.Time(timestampMs), lbls) }) case errors.Is(cause, storage.ErrDuplicateSampleForTimestamp): newValueForTimestampCount++ + i.validateMetrics.DiscardedSeriesTracker.Track(newValueForTimestamp, userID, copiedLabels.Hash()) updateFirstPartial(func() error { return wrappedTSDBIngestErr(err, model.Time(timestampMs), lbls) }) case errors.Is(cause, storage.ErrTooOldSample): sampleTooOldCount++ + i.validateMetrics.DiscardedSeriesTracker.Track(sampleTooOld, userID, copiedLabels.Hash()) updateFirstPartial(func() error { return wrappedTSDBIngestErr(err, model.Time(timestampMs), lbls) }) case errors.Is(cause, errMaxSeriesPerUserLimitExceeded): perUserSeriesLimitCount++ + i.validateMetrics.DiscardedSeriesTracker.Track(perUserSeriesLimit, userID, copiedLabels.Hash()) updateFirstPartial(func() error { return makeLimitError(perUserSeriesLimit, i.limiter.FormatError(userID, cause, copiedLabels)) }) @@ -1285,12 +1292,17 @@ func (i *Ingester) Push(ctx context.Context, req *cortexpb.WriteRequest) (*corte case errors.Is(cause, errMaxSeriesPerMetricLimitExceeded): perMetricSeriesLimitCount++ + i.validateMetrics.DiscardedSeriesTracker.Track(perMetricSeriesLimit, userID, copiedLabels.Hash()) updateFirstPartial(func() error { return makeMetricLimitError(perMetricSeriesLimit, copiedLabels, i.limiter.FormatError(userID, cause, copiedLabels)) }) case errors.As(cause, &errMaxSeriesPerLabelSetLimitExceeded{}): perLabelSetSeriesLimitCount++ + i.validateMetrics.DiscardedSeriesTracker.Track(perLabelsetSeriesLimit, userID, copiedLabels.Hash()) + for _, matchedLabelset := range matchedLabelSetLimits { + i.validateMetrics.DiscardedSeriesPerLabelsetTracker.Track(userID, copiedLabels.Hash(), matchedLabelset.Hash, matchedLabelset.Id) + } // We only track per labelset discarded samples for throttling by labelset limit. reasonCounter.increment(matchedLabelSetLimits, perLabelsetSeriesLimit) updateFirstPartial(func() error { @@ -1312,9 +1324,6 @@ func (i *Ingester) Push(ctx context.Context, req *cortexpb.WriteRequest) (*corte case errors.Is(cause, histogram.ErrHistogramCountMismatch): updateFirstPartial(func() error { return wrappedTSDBIngestErr(err, model.Time(timestampMs), lbls) }) - case errors.Is(cause, storage.ErrOOONativeHistogramsDisabled): - updateFirstPartial(func() error { return wrappedTSDBIngestErr(err, model.Time(timestampMs), lbls) }) - default: rollback = true } @@ -1461,7 +1470,7 @@ func (i *Ingester) Push(ctx context.Context, req *cortexpb.WriteRequest) (*corte Labels: cortexpb.FromLabelAdaptersToLabelsWithCopy(ex.Labels), } - if _, err = app.AppendExemplar(ref, nil, e); err == nil { + if _, err = app.AppendExemplar(ref, labels.EmptyLabels(), e); err == nil { succeededExemplarsCount++ continue } @@ -1746,10 +1755,7 @@ func (i *Ingester) LabelValuesStream(req *client.LabelValuesRequest, stream clie } for i := 0; i < len(resp.LabelValues); i += metadataStreamBatchSize { - j := i + metadataStreamBatchSize - if j > len(resp.LabelValues) { - j = len(resp.LabelValues) - } + j := min(i+metadataStreamBatchSize, len(resp.LabelValues)) resp := &client.LabelValuesStreamResponse{ LabelValues: resp.LabelValues[i:j], } @@ -1843,10 +1849,7 @@ func (i *Ingester) LabelNamesStream(req *client.LabelNamesRequest, stream client } for i := 0; i < len(resp.LabelNames); i += metadataStreamBatchSize { - j := i + metadataStreamBatchSize - if j > len(resp.LabelNames) { - j = len(resp.LabelNames) - } + j := min(i+metadataStreamBatchSize, len(resp.LabelNames)) resp := &client.LabelNamesStreamResponse{ LabelNames: resp.LabelNames[i:j], } @@ -2140,7 +2143,7 @@ func (i *Ingester) userStats() []UserIDStats { func (i *Ingester) AllUserStatsHandler(w http.ResponseWriter, r *http.Request) { stats := i.userStats() - AllUserStatsRender(w, r, stats, 0) + AllUserStatsRender(w, r, stats, 0, 0) } // AllUserStats returns ingestion statistics for all users known to this ingester. @@ -2266,7 +2269,7 @@ func (i *Ingester) trackInflightQueryRequest() (func(), error) { if i.resourceBasedLimiter != nil { if err := i.resourceBasedLimiter.AcceptNewRequest(); err != nil { level.Warn(i.logger).Log("msg", "failed to accept request", "err", err) - return nil, httpgrpc.Errorf(http.StatusServiceUnavailable, "failed to query: %s", limiter.ErrResourceLimitReachedStr) + return nil, limiter.ErrResourceLimitReached } } @@ -2518,9 +2521,9 @@ func (i *Ingester) createTSDB(userID string) (*userTSDB, error) { } oooTimeWindow := i.limits.OutOfOrderTimeWindow(userID) - walCompressType := wlog.CompressionNone + walCompressType := compression.None if i.cfg.BlocksStorageConfig.TSDB.WALCompressionType != "" { - walCompressType = wlog.CompressionType(i.cfg.BlocksStorageConfig.TSDB.WALCompressionType) + walCompressType = i.cfg.BlocksStorageConfig.TSDB.WALCompressionType } // Create a new user database @@ -2542,7 +2545,6 @@ func (i *Ingester) createTSDB(userID string) (*userTSDB, error) { EnableMemorySnapshotOnShutdown: i.cfg.BlocksStorageConfig.TSDB.MemorySnapshotOnShutdown, OutOfOrderTimeWindow: time.Duration(oooTimeWindow).Milliseconds(), OutOfOrderCapMax: i.cfg.BlocksStorageConfig.TSDB.OutOfOrderCapMax, - EnableOOONativeHistograms: true, EnableOverlappingCompaction: false, // Always let compactors handle overlapped blocks, e.g. OOO blocks. EnableNativeHistograms: true, // Always enable Native Histograms. Gate keeping is done though a per-tenant limit at ingestion. BlockChunkQuerierFunc: i.blockChunkQuerierFunc(userID), @@ -2578,15 +2580,7 @@ func (i *Ingester) createTSDB(userID string) (*userTSDB, error) { // Thanos shipper requires at least 1 external label to be set. For this reason, // we set the tenant ID as external label and we'll filter it out when reading // the series from the storage. - l := labels.Labels{ - { - Name: cortex_tsdb.TenantIDExternalLabel, - Value: userID, - }, { - Name: cortex_tsdb.IngesterIDExternalLabel, - Value: i.TSDBState.shipperIngesterID, - }, - } + l := labels.FromStrings(cortex_tsdb.TenantIDExternalLabel, userID, cortex_tsdb.IngesterIDExternalLabel, i.TSDBState.shipperIngesterID) // Create a new shipper for this database if i.cfg.BlocksStorageConfig.TSDB.IsBlocksShippingEnabled() { @@ -2622,7 +2616,6 @@ func (i *Ingester) closeAllTSDB() { // Concurrently close all users TSDB for userID, userDB := range i.TSDBState.dbs { - userID := userID go func(db *userTSDB) { defer wg.Done() diff --git a/pkg/ingester/ingester_no_race_test.go b/pkg/ingester/ingester_no_race_test.go index 656a7ab28c..f6b7a28d27 100644 --- a/pkg/ingester/ingester_no_race_test.go +++ b/pkg/ingester/ingester_no_race_test.go @@ -38,7 +38,7 @@ func TestExpandedCachePostings_Race(t *testing.T) { defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck // Wait until the ingester is ACTIVE - test.Poll(t, 100*time.Millisecond, ring.ACTIVE, func() interface{} { + test.Poll(t, 100*time.Millisecond, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -48,10 +48,10 @@ func TestExpandedCachePostings_Race(t *testing.T) { labelNames := 100 seriesPerLabelName := 200 - for j := 0; j < labelNames; j++ { + for j := range labelNames { metricName := fmt.Sprintf("test_metric_%d", j) wg.Add(seriesPerLabelName * 2) - for k := 0; k < seriesPerLabelName; k++ { + for k := range seriesPerLabelName { go func() { defer wg.Done() _, err := i.Push(ctx, cortexpb.ToWriteRequest( diff --git a/pkg/ingester/ingester_test.go b/pkg/ingester/ingester_test.go index c9948f9ec6..d8313aac3e 100644 --- a/pkg/ingester/ingester_test.go +++ b/pkg/ingester/ingester_test.go @@ -145,15 +145,15 @@ func TestMatcherCache(t *testing.T) { defer services.StopAndAwaitTerminated(context.Background(), ing) //nolint:errcheck // Wait until it's ACTIVE - test.Poll(t, time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, time.Second, ring.ACTIVE, func() any { return ing.lifecycler.GetState() }) ctx := user.InjectOrgID(context.Background(), userID) // Lets have 1 key evicted numberOfDifferentMatchers := cfg.MatchersCacheMaxItems + 1 callPerMatcher := 10 - for j := 0; j < numberOfDifferentMatchers; j++ { - for i := 0; i < callPerMatcher; i++ { + for j := range numberOfDifferentMatchers { + for range callPerMatcher { s := &mockQueryStreamServer{ctx: ctx} err = ing.QueryStream(&client.QueryRequest{ StartTimestampMs: math.MinInt64, @@ -212,7 +212,7 @@ func TestIngesterDeletionRace(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(context.Background(), ing)) defer services.StopAndAwaitTerminated(context.Background(), ing) //nolint:errcheck // Wait until it's ACTIVE - test.Poll(t, time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, time.Second, ring.ACTIVE, func() any { return ing.lifecycler.GetState() }) @@ -220,7 +220,7 @@ func TestIngesterDeletionRace(t *testing.T) { wg := sync.WaitGroup{} wg.Add(numberOfTenants) - for i := 0; i < numberOfTenants; i++ { + for i := range numberOfTenants { go func() { defer wg.Done() u := fmt.Sprintf("userId_%v", i) @@ -236,8 +236,7 @@ func TestIngesterDeletionRace(t *testing.T) { wg.Wait() - ctx, c := context.WithCancel(context.Background()) - defer c() + ctx := t.Context() wg.Add(1) go func() { @@ -250,7 +249,7 @@ func TestIngesterDeletionRace(t *testing.T) { ing.closeAndDeleteIdleUserTSDBs(ctx) //nolint:errcheck }() - test.Poll(t, 5*time.Second, 0, func() interface{} { + test.Poll(t, 5*time.Second, 0, func() any { return len(ing.getTSDBUsers()) }) } @@ -295,7 +294,7 @@ func TestIngesterPerLabelsetLimitExceeded(t *testing.T) { require.NoError(t, err) require.NoError(t, services.StartAndAwaitRunning(context.Background(), ing)) // Wait until it's ACTIVE - test.Poll(t, time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, time.Second, ring.ACTIVE, func() any { return ing.lifecycler.GetState() }) @@ -305,9 +304,9 @@ func TestIngesterPerLabelsetLimitExceeded(t *testing.T) { // Create first series within the limits for _, set := range limits.LimitsPerLabelSet { lbls := []string{labels.MetricName, "metric_name"} - for _, lbl := range set.LabelSet { - lbls = append(lbls, lbl.Name, lbl.Value) - } + set.LabelSet.Range(func(l labels.Label) { + lbls = append(lbls, l.Name, l.Value) + }) for i := 0; i < set.Limits.MaxSeries; i++ { _, err = ing.Push(ctx, cortexpb.ToWriteRequest( []labels.Labels{labels.FromStrings(append(lbls, "extraLabel", fmt.Sprintf("extraValue%v", i))...)}, samples, nil, nil, cortexpb.API)) @@ -330,9 +329,9 @@ func TestIngesterPerLabelsetLimitExceeded(t *testing.T) { // Should impose limits for _, set := range limits.LimitsPerLabelSet { lbls := []string{labels.MetricName, "metric_name"} - for _, lbl := range set.LabelSet { - lbls = append(lbls, lbl.Name, lbl.Value) - } + set.LabelSet.Range(func(l labels.Label) { + lbls = append(lbls, l.Name, l.Value) + }) _, err = ing.Push(ctx, cortexpb.ToWriteRequest( []labels.Labels{labels.FromStrings(append(lbls, "newLabel", "newValue")...)}, samples, nil, nil, cortexpb.API)) httpResp, ok := httpgrpc.HTTPResponseFromError(err) @@ -418,7 +417,7 @@ func TestIngesterPerLabelsetLimitExceeded(t *testing.T) { `), "cortex_ingester_usage_per_labelset", "cortex_ingester_limits_per_labelset", "cortex_discarded_samples_total", "cortex_discarded_samples_per_labelset_total")) // Adding 5 metrics with only 1 label - for i := 0; i < 5; i++ { + for i := range 5 { lbls := []string{labels.MetricName, "metric_name", "comp1", "compValue1"} _, err = ing.Push(ctx, cortexpb.ToWriteRequest( []labels.Labels{labels.FromStrings(append(lbls, "extraLabel", fmt.Sprintf("extraValue%v", i))...)}, samples, nil, nil, cortexpb.API)) @@ -427,7 +426,7 @@ func TestIngesterPerLabelsetLimitExceeded(t *testing.T) { // Adding 2 metrics with both labels (still below the limit) lbls := []string{labels.MetricName, "metric_name", "comp1", "compValue1", "comp2", "compValue2"} - for i := 0; i < 2; i++ { + for i := range 2 { _, err = ing.Push(ctx, cortexpb.ToWriteRequest( []labels.Labels{labels.FromStrings(append(lbls, "extraLabel", fmt.Sprintf("extraValue%v", i))...)}, samples, nil, nil, cortexpb.API)) require.NoError(t, err) @@ -533,7 +532,7 @@ func TestIngesterPerLabelsetLimitExceeded(t *testing.T) { tenantLimits.setLimits(userID, &limits) lbls = []string{labels.MetricName, "test_default"} - for i := 0; i < 2; i++ { + for i := range 2 { _, err = ing.Push(ctx, cortexpb.ToWriteRequest( []labels.Labels{labels.FromStrings(append(lbls, "series", strconv.Itoa(i))...)}, samples, nil, nil, cortexpb.API)) require.NoError(t, err) @@ -696,7 +695,7 @@ func TestPushRace(t *testing.T) { defer services.StopAndAwaitTerminated(context.Background(), ing) //nolint:errcheck require.NoError(t, services.StartAndAwaitRunning(context.Background(), ing)) // Wait until it's ACTIVE - test.Poll(t, time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, time.Second, ring.ACTIVE, func() any { return ing.lifecycler.GetState() }) @@ -710,8 +709,8 @@ func TestPushRace(t *testing.T) { numberOfSeries := 100 wg := sync.WaitGroup{} wg.Add(numberOfSeries * concurrentRequest) - for k := 0; k < numberOfSeries; k++ { - for i := 0; i < concurrentRequest; i++ { + for k := range numberOfSeries { + for range concurrentRequest { go func() { defer wg.Done() _, err := ing.Push(ctx, cortexpb.ToWriteRequest([]labels.Labels{labels.FromStrings(labels.MetricName, "foo", "userId", userID, "k", strconv.Itoa(k))}, []cortexpb.Sample{sample1}, nil, nil, cortexpb.API)) @@ -759,7 +758,7 @@ func TestIngesterUserLimitExceeded(t *testing.T) { userID := "1" // Series - labels1 := labels.Labels{{Name: labels.MetricName, Value: "testmetric"}, {Name: "foo", Value: "bar"}} + labels1 := labels.FromStrings(labels.MetricName, "testmetric", "foo", "bar") sample1 := cortexpb.Sample{ TimestampMs: 0, Value: 1, @@ -768,7 +767,7 @@ func TestIngesterUserLimitExceeded(t *testing.T) { TimestampMs: 1, Value: 2, } - labels3 := labels.Labels{{Name: labels.MetricName, Value: "testmetric"}, {Name: "foo", Value: "biz"}} + labels3 := labels.FromStrings(labels.MetricName, "testmetric", "foo", "biz") sample3 := cortexpb.Sample{ TimestampMs: 1, Value: 3, @@ -789,7 +788,7 @@ func TestIngesterUserLimitExceeded(t *testing.T) { require.NoError(t, err) require.NoError(t, services.StartAndAwaitRunning(context.Background(), ing)) // Wait until it's ACTIVE - test.Poll(t, time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, time.Second, ring.ACTIVE, func() any { return ing.lifecycler.GetState() }) @@ -878,8 +877,8 @@ func TestIngesterUserLimitExceededForNativeHistogram(t *testing.T) { userID := "1" // Series - labels1 := labels.Labels{{Name: labels.MetricName, Value: "testmetric"}, {Name: "foo", Value: "bar"}} - labels3 := labels.Labels{{Name: labels.MetricName, Value: "testmetric"}, {Name: "foo", Value: "biz"}} + labels1 := labels.FromStrings(labels.MetricName, "testmetric", "foo", "bar") + labels3 := labels.FromStrings(labels.MetricName, "testmetric", "foo", "biz") sampleNativeHistogram1 := cortexpb.HistogramToHistogramProto(0, tsdbutil.GenerateTestHistogram(1)) sampleNativeHistogram2 := cortexpb.HistogramToHistogramProto(1, tsdbutil.GenerateTestHistogram(2)) sampleNativeHistogram3 := cortexpb.HistogramToHistogramProto(0, tsdbutil.GenerateTestHistogram(3)) @@ -900,7 +899,7 @@ func TestIngesterUserLimitExceededForNativeHistogram(t *testing.T) { require.NoError(t, err) require.NoError(t, services.StartAndAwaitRunning(context.Background(), ing)) // Wait until it's ACTIVE - test.Poll(t, time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, time.Second, ring.ACTIVE, func() any { return ing.lifecycler.GetState() }) @@ -957,14 +956,20 @@ func TestIngesterUserLimitExceededForNativeHistogram(t *testing.T) { } func benchmarkData(nSeries int) (allLabels []labels.Labels, allSamples []cortexpb.Sample) { - for j := 0; j < nSeries; j++ { - labels := chunk.BenchmarkLabels.Copy() - for i := range labels { - if labels[i].Name == "cpu" { - labels[i].Value = fmt.Sprintf("cpu%02d", j) + for j := range nSeries { + lbls := chunk.BenchmarkLabels.Copy() + + builder := labels.NewBuilder(labels.EmptyLabels()) + lbls.Range(func(l labels.Label) { + val := l.Value + if l.Name == "cpu" { + val = fmt.Sprintf("cpu%02d", j) } - } - allLabels = append(allLabels, labels) + + builder.Set(l.Name, val) + }) + + allLabels = append(allLabels, builder.Labels()) allSamples = append(allSamples, cortexpb.Sample{TimestampMs: 0, Value: float64(j)}) } return @@ -978,7 +983,7 @@ func TestIngesterMetricLimitExceeded(t *testing.T) { limits.MaxLocalMetadataPerMetric = 1 userID := "1" - labels1 := labels.Labels{{Name: labels.MetricName, Value: "testmetric"}, {Name: "foo", Value: "bar"}} + labels1 := labels.FromStrings(labels.MetricName, "testmetric", "foo", "bar") sample1 := cortexpb.Sample{ TimestampMs: 0, Value: 1, @@ -987,7 +992,7 @@ func TestIngesterMetricLimitExceeded(t *testing.T) { TimestampMs: 1, Value: 2, } - labels3 := labels.Labels{{Name: labels.MetricName, Value: "testmetric"}, {Name: "foo", Value: "biz"}} + labels3 := labels.FromStrings(labels.MetricName, "testmetric", "foo", "biz") sample3 := cortexpb.Sample{ TimestampMs: 1, Value: 3, @@ -1009,7 +1014,7 @@ func TestIngesterMetricLimitExceeded(t *testing.T) { require.NoError(t, err) require.NoError(t, services.StartAndAwaitRunning(context.Background(), ing)) // Wait until it's ACTIVE - test.Poll(t, time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, time.Second, ring.ACTIVE, func() any { return ing.lifecycler.GetState() }) @@ -2050,7 +2055,7 @@ func TestIngester_Push(t *testing.T) { ctx := user.InjectOrgID(context.Background(), userID) // Wait until the ingester is ACTIVE - test.Poll(t, 100*time.Millisecond, ring.ACTIVE, func() interface{} { + test.Poll(t, 100*time.Millisecond, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -2280,7 +2285,7 @@ func TestIngester_PushNativeHistogramErrors(t *testing.T) { ctx := user.InjectOrgID(context.Background(), userID) // Wait until the ingester is ACTIVE - test.Poll(t, 100*time.Millisecond, ring.ACTIVE, func() interface{} { + test.Poll(t, 100*time.Millisecond, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -2319,7 +2324,7 @@ func TestIngester_Push_ShouldCorrectlyTrackMetricsInMultiTenantScenario(t *testi defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck // Wait until the ingester is ACTIVE - test.Poll(t, 100*time.Millisecond, ring.ACTIVE, func() interface{} { + test.Poll(t, 100*time.Millisecond, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -2403,7 +2408,7 @@ func TestIngester_Push_DecreaseInactiveSeries(t *testing.T) { defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck // Wait until the ingester is ACTIVE - test.Poll(t, 100*time.Millisecond, ring.ACTIVE, func() interface{} { + test.Poll(t, 100*time.Millisecond, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -2466,19 +2471,19 @@ func TestIngester_Push_OutOfOrderLabels(t *testing.T) { defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck // Wait until it's ACTIVE - test.Poll(t, time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, time.Second, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) ctx := user.InjectOrgID(context.Background(), "test-user") - outOfOrderLabels := labels.Labels{ + outOfOrderLabels := []cortexpb.LabelAdapter{ {Name: labels.MetricName, Value: "test_metric"}, {Name: "c", Value: "3"}, - {Name: "a", Value: "1"}, // Out of order (a comes before c) + {Name: "a", Value: "1"}, } - req, _ := mockWriteRequest(t, outOfOrderLabels, 1, 2) + req, _ := mockWriteRequest(t, cortexpb.FromLabelAdaptersToLabels(outOfOrderLabels), 1, 2) _, err = i.Push(ctx, req) require.Error(t, err) require.Contains(t, err.Error(), "out-of-order label set found") @@ -2511,7 +2516,7 @@ func benchmarkIngesterPush(b *testing.B, limits validation.Limits, errorsExpecte defer services.StopAndAwaitTerminated(context.Background(), ingester) //nolint:errcheck // Wait until the ingester is ACTIVE - test.Poll(b, 100*time.Millisecond, ring.ACTIVE, func() interface{} { + test.Poll(b, 100*time.Millisecond, ring.ACTIVE, func() any { return ingester.lifecycler.GetState() }) @@ -2536,10 +2541,9 @@ func benchmarkIngesterPush(b *testing.B, limits validation.Limits, errorsExpecte allLabels, allSamples := benchmarkData(series) - b.ResetTimer() - for iter := 0; iter < b.N; iter++ { + for iter := 0; b.Loop(); iter++ { // Bump the timestamp on each of our test samples each time round the loop - for j := 0; j < samples; j++ { + for j := range samples { for i := range allSamples { allSamples[i].TimestampMs = startTime + int64(iter*samples+j+1) } @@ -2599,7 +2603,7 @@ func Benchmark_Ingester_PushOnError(b *testing.B) { beforeBenchmark: func(b *testing.B, ingester *Ingester, numSeriesPerRequest int) { // Push a single time series to set the TSDB min time. currTimeReq := cortexpb.ToWriteRequest( - []labels.Labels{{{Name: labels.MetricName, Value: metricName}}}, + []labels.Labels{labels.FromStrings(labels.MetricName, metricName)}, []cortexpb.Sample{{Value: 1, TimestampMs: util.TimeToMillis(time.Now())}}, nil, nil, @@ -2611,7 +2615,7 @@ func Benchmark_Ingester_PushOnError(b *testing.B) { expectedErr := storage.ErrOutOfBounds.Error() // Push out of bound samples. - for n := 0; n < b.N; n++ { + for b.Loop() { _, err := ingester.Push(ctx, cortexpb.ToWriteRequest(metrics, samples, nil, nil, cortexpb.API)) // nolint:errcheck verifyErrorString(b, err, expectedErr) @@ -2622,9 +2626,9 @@ func Benchmark_Ingester_PushOnError(b *testing.B) { prepareConfig: func(limits *validation.Limits, instanceLimits *InstanceLimits) bool { return true }, beforeBenchmark: func(b *testing.B, ingester *Ingester, numSeriesPerRequest int) { // For each series, push a single sample with a timestamp greater than next pushes. - for i := 0; i < numSeriesPerRequest; i++ { + for i := range numSeriesPerRequest { currTimeReq := cortexpb.ToWriteRequest( - []labels.Labels{{{Name: labels.MetricName, Value: metricName}, {Name: "cardinality", Value: strconv.Itoa(i)}}}, + []labels.Labels{labels.FromStrings(labels.MetricName, metricName, "cardinality", strconv.Itoa(i))}, []cortexpb.Sample{{Value: 1, TimestampMs: sampleTimestamp + 1}}, nil, nil, @@ -2638,7 +2642,7 @@ func Benchmark_Ingester_PushOnError(b *testing.B) { expectedErr := storage.ErrOutOfOrderSample.Error() // Push out of order samples. - for n := 0; n < b.N; n++ { + for b.Loop() { _, err := ingester.Push(ctx, cortexpb.ToWriteRequest(metrics, samples, nil, nil, cortexpb.API)) // nolint:errcheck verifyErrorString(b, err, expectedErr) @@ -2663,7 +2667,7 @@ func Benchmark_Ingester_PushOnError(b *testing.B) { }, runBenchmark: func(b *testing.B, ingester *Ingester, metrics []labels.Labels, samples []cortexpb.Sample) { // Push series with a different name than the one already pushed. - for n := 0; n < b.N; n++ { + for b.Loop() { _, err := ingester.Push(ctx, cortexpb.ToWriteRequest(metrics, samples, nil, nil, cortexpb.API)) // nolint:errcheck verifyErrorString(b, err, "per-user series limit") } @@ -2687,7 +2691,7 @@ func Benchmark_Ingester_PushOnError(b *testing.B) { }, runBenchmark: func(b *testing.B, ingester *Ingester, metrics []labels.Labels, samples []cortexpb.Sample) { // Push series with different labels than the one already pushed. - for n := 0; n < b.N; n++ { + for b.Loop() { _, err := ingester.Push(ctx, cortexpb.ToWriteRequest(metrics, samples, nil, nil, cortexpb.API)) // nolint:errcheck verifyErrorString(b, err, "per-metric series limit") } @@ -2710,7 +2714,7 @@ func Benchmark_Ingester_PushOnError(b *testing.B) { }, runBenchmark: func(b *testing.B, ingester *Ingester, metrics []labels.Labels, samples []cortexpb.Sample) { // Push series with different labels than the one already pushed. - for n := 0; n < b.N; n++ { + for b.Loop() { _, err := ingester.Push(ctx, cortexpb.ToWriteRequest(metrics, samples, nil, nil, cortexpb.API)) verifyErrorString(b, err, "push rate reached") } @@ -2732,7 +2736,7 @@ func Benchmark_Ingester_PushOnError(b *testing.B) { }, runBenchmark: func(b *testing.B, ingester *Ingester, metrics []labels.Labels, samples []cortexpb.Sample) { // Push series with different labels than the one already pushed. - for n := 0; n < b.N; n++ { + for b.Loop() { _, err := ingester.Push(ctx, cortexpb.ToWriteRequest(metrics, samples, nil, nil, cortexpb.API)) verifyErrorString(b, err, "max tenants limit reached") } @@ -2751,7 +2755,7 @@ func Benchmark_Ingester_PushOnError(b *testing.B) { require.NoError(b, err) }, runBenchmark: func(b *testing.B, ingester *Ingester, metrics []labels.Labels, samples []cortexpb.Sample) { - for n := 0; n < b.N; n++ { + for b.Loop() { _, err := ingester.Push(ctx, cortexpb.ToWriteRequest(metrics, samples, nil, nil, cortexpb.API)) verifyErrorString(b, err, "max series limit reached") } @@ -2769,7 +2773,7 @@ func Benchmark_Ingester_PushOnError(b *testing.B) { ingester.inflightPushRequests.Inc() }, runBenchmark: func(b *testing.B, ingester *Ingester, metrics []labels.Labels, samples []cortexpb.Sample) { - for n := 0; n < b.N; n++ { + for b.Loop() { _, err := ingester.Push(ctx, cortexpb.ToWriteRequest(metrics, samples, nil, nil, cortexpb.API)) verifyErrorString(b, err, "too many inflight push requests") } @@ -2811,7 +2815,7 @@ func Benchmark_Ingester_PushOnError(b *testing.B) { defer services.StopAndAwaitTerminated(context.Background(), ingester) //nolint:errcheck // Wait until the ingester is ACTIVE - test.Poll(b, 100*time.Millisecond, ring.ACTIVE, func() interface{} { + test.Poll(b, 100*time.Millisecond, ring.ACTIVE, func() any { return ingester.lifecycler.GetState() }) @@ -2821,7 +2825,7 @@ func Benchmark_Ingester_PushOnError(b *testing.B) { metrics := make([]labels.Labels, 0, scenario.numSeriesPerRequest) samples := make([]cortexpb.Sample, 0, scenario.numSeriesPerRequest) for i := 0; i < scenario.numSeriesPerRequest; i++ { - metrics = append(metrics, labels.Labels{{Name: labels.MetricName, Value: metricName}, {Name: "cardinality", Value: strconv.Itoa(i)}}) + metrics = append(metrics, labels.FromStrings(labels.MetricName, metricName, "cardinality", strconv.Itoa(i))) samples = append(samples, cortexpb.Sample{Value: float64(i), TimestampMs: sampleTimestamp}) } @@ -2831,7 +2835,6 @@ func Benchmark_Ingester_PushOnError(b *testing.B) { start := make(chan struct{}) b.ReportAllocs() - b.ResetTimer() for c := 0; c < scenario.numConcurrentClients; c++ { go func() { @@ -2857,9 +2860,9 @@ func Test_Ingester_LabelNames(t *testing.T) { value float64 timestamp int64 }{ - {labels.Labels{{Name: labels.MetricName, Value: "test_1"}, {Name: "route", Value: "get_user"}, {Name: "status", Value: "200"}}, 1, 100000}, - {labels.Labels{{Name: labels.MetricName, Value: "test_1"}, {Name: "route", Value: "get_user"}, {Name: "status", Value: "500"}}, 1, 110000}, - {labels.Labels{{Name: labels.MetricName, Value: "test_2"}}, 2, 200000}, + {labels.FromStrings("__name__", "test_1", "route", "get_user", "status", "200"), 1, 100000}, + {labels.FromStrings("__name__", "test_1", "route", "get_user", "status", "500"), 1, 110000}, + {labels.FromStrings("__name__", "test_2"), 2, 200000}, } expected := []string{"__name__", "route", "status"} @@ -2871,7 +2874,7 @@ func Test_Ingester_LabelNames(t *testing.T) { defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck // Wait until it's ACTIVE - test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, 1*time.Second, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -2913,9 +2916,9 @@ func Test_Ingester_LabelValues(t *testing.T) { value float64 timestamp int64 }{ - {labels.Labels{{Name: labels.MetricName, Value: "test_1"}, {Name: "route", Value: "get_user"}, {Name: "status", Value: "200"}}, 1, 100000}, - {labels.Labels{{Name: labels.MetricName, Value: "test_1"}, {Name: "route", Value: "get_user"}, {Name: "status", Value: "500"}}, 1, 110000}, - {labels.Labels{{Name: labels.MetricName, Value: "test_2"}}, 2, 200000}, + {labels.FromStrings("__name__", "test_1", "route", "get_user", "status", "200"), 1, 100000}, + {labels.FromStrings("__name__", "test_1", "route", "get_user", "status", "500"), 1, 110000}, + {labels.FromStrings("__name__", "test_2"), 2, 200000}, } expected := map[string][]string{ @@ -2932,7 +2935,7 @@ func Test_Ingester_LabelValues(t *testing.T) { defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck // Wait until it's ACTIVE - test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, 1*time.Second, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -2982,7 +2985,7 @@ func Test_Ingester_LabelValue_MaxInflightQueryRequest(t *testing.T) { defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck // Wait until it's ACTIVE - test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, 1*time.Second, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -2991,7 +2994,7 @@ func Test_Ingester_LabelValue_MaxInflightQueryRequest(t *testing.T) { // Mock request ctx := user.InjectOrgID(context.Background(), "test") - wreq, _ := mockWriteRequest(t, labels.Labels{{Name: labels.MetricName, Value: "test_1"}, {Name: "route", Value: "get_user"}, {Name: "status", Value: "200"}}, 1, 100000) + wreq, _ := mockWriteRequest(t, labels.FromStrings("__name__", "test_1", "route", "get_user", "status", "200"), 1, 100000) _, err = i.Push(ctx, wreq) require.NoError(t, err) @@ -3007,9 +3010,9 @@ func Test_Ingester_Query(t *testing.T) { value float64 timestamp int64 }{ - {labels.Labels{{Name: labels.MetricName, Value: "test_1"}, {Name: "route", Value: "get_user"}, {Name: "status", Value: "200"}}, 1, 100000}, - {labels.Labels{{Name: labels.MetricName, Value: "test_1"}, {Name: "route", Value: "get_user"}, {Name: "status", Value: "500"}}, 1, 110000}, - {labels.Labels{{Name: labels.MetricName, Value: "test_2"}}, 2, 200000}, + {labels.FromStrings("__name__", "test_1", "route", "get_user", "status", "200"), 1, 100000}, + {labels.FromStrings("__name__", "test_1", "route", "get_user", "status", "500"), 1, 110000}, + {labels.FromStrings("__name__", "test_2"), 2, 200000}, } tests := map[string]struct { @@ -3098,7 +3101,7 @@ func Test_Ingester_Query(t *testing.T) { defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck // Wait until it's ACTIVE - test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, 1*time.Second, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -3141,7 +3144,7 @@ func Test_Ingester_Query_MaxInflightQueryRequest(t *testing.T) { defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck // Wait until it's ACTIVE - test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, 1*time.Second, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -3150,7 +3153,7 @@ func Test_Ingester_Query_MaxInflightQueryRequest(t *testing.T) { // Mock request ctx := user.InjectOrgID(context.Background(), "test") - wreq, _ := mockWriteRequest(t, labels.Labels{{Name: labels.MetricName, Value: "test_1"}, {Name: "route", Value: "get_user"}, {Name: "status", Value: "200"}}, 1, 100000) + wreq, _ := mockWriteRequest(t, labels.FromStrings("__name__", "test_1", "route", "get_user", "status", "200"), 1, 100000) _, err = i.Push(ctx, wreq) require.NoError(t, err) @@ -3191,7 +3194,7 @@ func Test_Ingester_Query_ResourceThresholdBreached(t *testing.T) { value float64 timestamp int64 }{ - {labels.Labels{{Name: labels.MetricName, Value: "test_1"}, {Name: "route", Value: "get_user"}, {Name: "status", Value: "200"}}, 1, 100000}, + {labels.FromStrings("__name__", "test_1", "route", "get_user", "status", "200"), 1, 100000}, } i, err := prepareIngesterWithBlocksStorage(t, defaultIngesterTestConfig(t), prometheus.NewRegistry()) @@ -3207,7 +3210,7 @@ func Test_Ingester_Query_ResourceThresholdBreached(t *testing.T) { require.NoError(t, err) // Wait until it's ACTIVE - test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, 1*time.Second, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -3224,8 +3227,9 @@ func Test_Ingester_Query_ResourceThresholdBreached(t *testing.T) { s := &mockQueryStreamServer{ctx: ctx} err = i.QueryStream(rreq, s) require.Error(t, err) - exhaustedErr := limiter.ResourceLimitReachedError{} - require.ErrorContains(t, err, exhaustedErr.Error()) + + // Expected error from isRetryableError in blocks_store_queryable.go + require.ErrorIs(t, err, limiter.ErrResourceLimitReached) } func TestIngester_LabelValues_ShouldNotCreateTSDBIfDoesNotExists(t *testing.T) { @@ -3361,12 +3365,12 @@ func Test_Ingester_MetricsForLabelMatchers(t *testing.T) { value float64 timestamp int64 }{ - {labels.Labels{{Name: labels.MetricName, Value: "test_1"}, {Name: "status", Value: "200"}}, 1, 100000}, - {labels.Labels{{Name: labels.MetricName, Value: "test_1"}, {Name: "status", Value: "500"}}, 1, 110000}, - {labels.Labels{{Name: labels.MetricName, Value: "test_2"}}, 2, 200000}, + {labels.FromStrings("__name__", "test_1", "status", "200"), 1, 100000}, + {labels.FromStrings("__name__", "test_1", "status", "500"), 1, 110000}, + {labels.FromStrings("__name__", "test_2"), 2, 200000}, // The two following series have the same FastFingerprint=e002a3a451262627 - {labels.Labels{{Name: labels.MetricName, Value: "collision"}, {Name: "app", Value: "l"}, {Name: "uniq0", Value: "0"}, {Name: "uniq1", Value: "1"}}, 1, 300000}, - {labels.Labels{{Name: labels.MetricName, Value: "collision"}, {Name: "app", Value: "m"}, {Name: "uniq0", Value: "1"}, {Name: "uniq1", Value: "1"}}, 1, 300000}, + {labels.FromStrings("__name__", "collision", "app", "l", "uniq0", "0", "uniq1", "1"), 1, 300000}, + {labels.FromStrings("__name__", "collision", "app", "m", "uniq0", "1", "uniq1", "1"), 1, 300000}, } tests := map[string]struct { @@ -3509,7 +3513,7 @@ func Test_Ingester_MetricsForLabelMatchers(t *testing.T) { defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck // Wait until it's ACTIVE - test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, 1*time.Second, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -3524,7 +3528,6 @@ func Test_Ingester_MetricsForLabelMatchers(t *testing.T) { // Run tests for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { req := &client.MetricsForLabelMatchersRequest{ @@ -3592,10 +3595,9 @@ func Benchmark_Ingester_MetricsForLabelMatchers(b *testing.B) { // fetching labels from blocks. i.Flush() - b.ResetTimer() b.ReportAllocs() - for n := 0; n < b.N; n++ { + for b.Loop() { req := &client.MetricsForLabelMatchersRequest{ StartTimestampMs: math.MinInt64, EndTimestampMs: math.MaxInt64, @@ -3623,7 +3625,7 @@ func createIngesterWithSeries(t testing.TB, userID string, numSeries, numSamples }) // Wait until it's ACTIVE. - test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, 1*time.Second, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -3638,11 +3640,8 @@ func createIngesterWithSeries(t testing.TB, userID string, numSeries, numSamples metrics := make([]labels.Labels, 0, batchSize) samples := make([]cortexpb.Sample, 0, batchSize) - for s := 0; s < batchSize; s++ { - metrics = append(metrics, labels.Labels{ - {Name: labels.MetricName, Value: fmt.Sprintf("test_%d", o+s)}, - }) - + for s := range batchSize { + metrics = append(metrics, labels.FromStrings("__name__", fmt.Sprintf("test_%d", o+s))) samples = append(samples, cortexpb.Sample{ TimestampMs: ts, Value: 1, @@ -3671,13 +3670,13 @@ func TestIngester_QueryStream(t *testing.T) { defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck // Wait until it's ACTIVE. - test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, 1*time.Second, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) // Push series. ctx := user.InjectOrgID(context.Background(), userID) - lbls := labels.Labels{{Name: labels.MetricName, Value: "foo"}} + lbls := labels.FromStrings(labels.MetricName, "foo") var ( req *cortexpb.WriteRequest expectedResponseChunks *client.QueryStreamResponse @@ -3755,7 +3754,7 @@ func TestIngester_QueryStreamManySamplesChunks(t *testing.T) { defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck // Wait until it's ACTIVE. - test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, 1*time.Second, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -3765,7 +3764,7 @@ func TestIngester_QueryStreamManySamplesChunks(t *testing.T) { const samplesCount = 1000000 samples := make([]cortexpb.Sample, 0, samplesCount) - for i := 0; i < samplesCount; i++ { + for i := range samplesCount { samples = append(samples, cortexpb.Sample{ Value: float64(i), TimestampMs: int64(i), @@ -3773,15 +3772,15 @@ func TestIngester_QueryStreamManySamplesChunks(t *testing.T) { } // 100k samples in chunks use about 154 KiB, - _, err = i.Push(ctx, writeRequestSingleSeries(labels.Labels{{Name: labels.MetricName, Value: "foo"}, {Name: "l", Value: "1"}}, samples[0:100000])) + _, err = i.Push(ctx, writeRequestSingleSeries(labels.FromStrings("__name__", "foo", "l", "1"), samples[0:100000])) require.NoError(t, err) // 1M samples in chunks use about 1.51 MiB, - _, err = i.Push(ctx, writeRequestSingleSeries(labels.Labels{{Name: labels.MetricName, Value: "foo"}, {Name: "l", Value: "2"}}, samples)) + _, err = i.Push(ctx, writeRequestSingleSeries(labels.FromStrings("__name__", "foo", "l", "2"), samples)) require.NoError(t, err) // 500k samples in chunks need 775 KiB, - _, err = i.Push(ctx, writeRequestSingleSeries(labels.Labels{{Name: labels.MetricName, Value: "foo"}, {Name: "l", Value: "3"}}, samples[0:500000])) + _, err = i.Push(ctx, writeRequestSingleSeries(labels.FromStrings("__name__", "foo", "l", "3"), samples[0:500000])) require.NoError(t, err) // Create a GRPC server used to query back the data. @@ -3952,7 +3951,7 @@ func benchmarkQueryStream(b *testing.B, samplesCount, seriesCount int) { defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck // Wait until it's ACTIVE. - test.Poll(b, 1*time.Second, ring.ACTIVE, func() interface{} { + test.Poll(b, 1*time.Second, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -3961,15 +3960,15 @@ func benchmarkQueryStream(b *testing.B, samplesCount, seriesCount int) { samples := make([]cortexpb.Sample, 0, samplesCount) - for i := 0; i < samplesCount; i++ { + for i := range samplesCount { samples = append(samples, cortexpb.Sample{ Value: float64(i), TimestampMs: int64(i), }) } - for s := 0; s < seriesCount; s++ { - _, err = i.Push(ctx, writeRequestSingleSeries(labels.Labels{{Name: labels.MetricName, Value: "foo"}, {Name: "l", Value: strconv.Itoa(s)}}, samples)) + for s := range seriesCount { + _, err = i.Push(ctx, writeRequestSingleSeries(labels.FromStrings("__name__", "foo", "l", strconv.Itoa(s)), samples)) require.NoError(b, err) } @@ -3986,10 +3985,9 @@ func benchmarkQueryStream(b *testing.B, samplesCount, seriesCount int) { mockStream := &mockQueryStreamServer{ctx: ctx} - b.ResetTimer() b.ReportAllocs() - for ix := 0; ix < b.N; ix++ { + for b.Loop() { err := i.QueryStream(req, mockStream) require.NoError(b, err) } @@ -4332,7 +4330,7 @@ func TestIngester_shipBlocks(t *testing.T) { defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck // Wait until it's ACTIVE - test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, 1*time.Second, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -4378,7 +4376,7 @@ func TestIngester_dontShipBlocksWhenTenantDeletionMarkerIsPresent(t *testing.T) defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck // Wait until it's ACTIVE - test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, 1*time.Second, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -4431,7 +4429,7 @@ func TestIngester_seriesCountIsCorrectAfterClosingTSDBForDeletedTenant(t *testin defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck // Wait until it's ACTIVE - test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, 1*time.Second, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -4469,7 +4467,7 @@ func TestIngester_sholdUpdateCacheShippedBlocks(t *testing.T) { defer services.StopAndAwaitTerminated(ctx, i) //nolint:errcheck // Wait until it's ACTIVE - test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, 1*time.Second, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -4509,7 +4507,7 @@ func TestIngester_closeAndDeleteUserTSDBIfIdle_shouldNotCloseTSDBIfShippingIsInP defer services.StopAndAwaitTerminated(ctx, i) //nolint:errcheck // Wait until it's ACTIVE - test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, 1*time.Second, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -4530,7 +4528,7 @@ func TestIngester_closeAndDeleteUserTSDBIfIdle_shouldNotCloseTSDBIfShippingIsInP go i.shipBlocks(ctx, nil) // Wait until shipping starts. - test.Poll(t, 1*time.Second, activeShipping, func() interface{} { + test.Poll(t, 1*time.Second, activeShipping, func() any { db.stateMtx.RLock() defer db.stateMtx.RUnlock() return db.state @@ -4551,7 +4549,7 @@ func TestIngester_closingAndOpeningTsdbConcurrently(t *testing.T) { defer services.StopAndAwaitTerminated(ctx, i) //nolint:errcheck // Wait until it's ACTIVE - test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, 1*time.Second, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -4576,7 +4574,7 @@ func TestIngester_closingAndOpeningTsdbConcurrently(t *testing.T) { } }() - for k := 0; k < iterations; k++ { + for range iterations { i.closeAndDeleteUserTSDBIfIdle(userID) } @@ -4604,7 +4602,7 @@ func TestIngester_idleCloseEmptyTSDB(t *testing.T) { defer services.StopAndAwaitTerminated(ctx, i) //nolint:errcheck // Wait until it's ACTIVE - test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, 1*time.Second, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -4653,7 +4651,7 @@ func TestIngester_ReadNotFailWhenTSDBIsBeingDeleted(t *testing.T) { defer services.StopAndAwaitTerminated(ctx, i) //nolint:errcheck // Wait until it's ACTIVE - test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, 1*time.Second, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -4709,7 +4707,7 @@ func TestIngester_invalidSamplesDontChangeLastUpdateTime(t *testing.T) { defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck // Wait until it's ACTIVE - test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, 1*time.Second, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -4717,7 +4715,7 @@ func TestIngester_invalidSamplesDontChangeLastUpdateTime(t *testing.T) { sampleTimestamp := int64(model.Now()) { - req, _ := mockWriteRequest(t, labels.Labels{{Name: labels.MetricName, Value: "test"}}, 0, sampleTimestamp) + req, _ := mockWriteRequest(t, labels.FromStrings("__name__", "test"), 0, sampleTimestamp) _, err = i.Push(ctx, req) require.NoError(t, err) } @@ -4727,13 +4725,13 @@ func TestIngester_invalidSamplesDontChangeLastUpdateTime(t *testing.T) { lastUpdate := db.lastUpdate.Load() // Wait until 1 second passes. - test.Poll(t, 1*time.Second, time.Now().Unix()+1, func() interface{} { + test.Poll(t, 1*time.Second, time.Now().Unix()+1, func() any { return time.Now().Unix() }) // Push another sample to the same metric and timestamp, with different value. We expect to get error. { - req, _ := mockWriteRequest(t, labels.Labels{{Name: labels.MetricName, Value: "test"}}, 1, sampleTimestamp) + req, _ := mockWriteRequest(t, labels.FromStrings("__name__", "test"), 1, sampleTimestamp) _, err = i.Push(ctx, req) require.Error(t, err) } @@ -4948,7 +4946,7 @@ func TestIngester_flushing(t *testing.T) { }) // Wait until it's ACTIVE - test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, 1*time.Second, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -4975,7 +4973,7 @@ func TestIngester_ForFlush(t *testing.T) { }) // Wait until it's ACTIVE - test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, 1*time.Second, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -5031,9 +5029,10 @@ func Test_Ingester_UserStats(t *testing.T) { value float64 timestamp int64 }{ - {labels.Labels{{Name: labels.MetricName, Value: "test_1"}, {Name: "route", Value: "get_user"}, {Name: "status", Value: "200"}}, 1, 100000}, - {labels.Labels{{Name: labels.MetricName, Value: "test_1"}, {Name: "route", Value: "get_user"}, {Name: "status", Value: "500"}}, 1, 110000}, - {labels.Labels{{Name: labels.MetricName, Value: "test_2"}}, 2, 200000}, + + {labels.FromStrings("__name__", "test_1", "route", "get_user", "status", "200"), 1, 100000}, + {labels.FromStrings("__name__", "test_1", "route", "get_user", "status", "500"), 1, 110000}, + {labels.FromStrings("__name__", "test_2"), 2, 200000}, } // Create ingester @@ -5043,7 +5042,7 @@ func Test_Ingester_UserStats(t *testing.T) { defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck // Wait until it's ACTIVE - test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, 1*time.Second, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -5077,11 +5076,11 @@ func Test_Ingester_AllUserStats(t *testing.T) { value float64 timestamp int64 }{ - {"user-1", labels.Labels{{Name: labels.MetricName, Value: "test_1_1"}, {Name: "route", Value: "get_user"}, {Name: "status", Value: "200"}}, 1, 100000}, - {"user-1", labels.Labels{{Name: labels.MetricName, Value: "test_1_1"}, {Name: "route", Value: "get_user"}, {Name: "status", Value: "500"}}, 1, 110000}, - {"user-1", labels.Labels{{Name: labels.MetricName, Value: "test_1_2"}}, 2, 200000}, - {"user-2", labels.Labels{{Name: labels.MetricName, Value: "test_2_1"}}, 2, 200000}, - {"user-2", labels.Labels{{Name: labels.MetricName, Value: "test_2_2"}}, 2, 200000}, + {"user-1", labels.FromStrings("__name__", "test_1_1", "route", "get_user", "status", "200"), 1, 100000}, + {"user-1", labels.FromStrings("__name__", "test_1_1", "route", "get_user", "status", "500"), 1, 110000}, + {"user-1", labels.FromStrings("__name__", "test_1_2"), 2, 200000}, + {"user-2", labels.FromStrings("__name__", "test_2_1"), 2, 200000}, + {"user-2", labels.FromStrings("__name__", "test_2_2"), 2, 200000}, } // Create ingester @@ -5091,7 +5090,7 @@ func Test_Ingester_AllUserStats(t *testing.T) { defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck // Wait until it's ACTIVE - test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, 1*time.Second, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) for _, series := range series { @@ -5145,11 +5144,11 @@ func Test_Ingester_AllUserStatsHandler(t *testing.T) { value float64 timestamp int64 }{ - {"user-1", labels.Labels{{Name: labels.MetricName, Value: "test_1_1"}, {Name: "route", Value: "get_user"}, {Name: "status", Value: "200"}}, 1, 100000}, - {"user-1", labels.Labels{{Name: labels.MetricName, Value: "test_1_1"}, {Name: "route", Value: "get_user"}, {Name: "status", Value: "500"}}, 1, 110000}, - {"user-1", labels.Labels{{Name: labels.MetricName, Value: "test_1_2"}}, 2, 200000}, - {"user-2", labels.Labels{{Name: labels.MetricName, Value: "test_2_1"}}, 2, 200000}, - {"user-2", labels.Labels{{Name: labels.MetricName, Value: "test_2_2"}}, 2, 200000}, + {"user-1", labels.FromStrings("__name__", "test_1_1", "route", "get_user", "status", "200"), 1, 100000}, + {"user-1", labels.FromStrings("__name__", "test_1_1", "route", "get_user", "status", "500"), 1, 110000}, + {"user-1", labels.FromStrings("__name__", "test_1_2"), 2, 200000}, + {"user-2", labels.FromStrings("__name__", "test_2_1"), 2, 200000}, + {"user-2", labels.FromStrings("__name__", "test_2_2"), 2, 200000}, } // Create ingester @@ -5159,7 +5158,7 @@ func Test_Ingester_AllUserStatsHandler(t *testing.T) { defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck // Wait until it's ACTIVE - test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, 1*time.Second, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) for _, series := range series { @@ -5235,7 +5234,7 @@ func TestIngesterCompactIdleBlock(t *testing.T) { }) // Wait until it's ACTIVE - test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, 1*time.Second, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -5317,7 +5316,7 @@ func TestIngesterCompactAndCloseIdleTSDB(t *testing.T) { }) // Wait until it's ACTIVE - test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, 1*time.Second, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -5358,7 +5357,7 @@ func TestIngesterCompactAndCloseIdleTSDB(t *testing.T) { `), metricsToCheck...)) // Wait until TSDB has been closed and removed. - test.Poll(t, 10*time.Second, 0, func() interface{} { + test.Poll(t, 10*time.Second, 0, func() any { i.stoppedMtx.Lock() defer i.stoppedMtx.Unlock() return len(i.TSDBState.dbs) @@ -5424,7 +5423,7 @@ func verifyCompactedHead(t *testing.T, i *Ingester, expected bool) { func pushSingleSampleWithMetadata(t *testing.T, i *Ingester) { ctx := user.InjectOrgID(context.Background(), userID) - req, _ := mockWriteRequest(t, labels.Labels{{Name: labels.MetricName, Value: "test"}}, 0, util.TimeToMillis(time.Now())) + req, _ := mockWriteRequest(t, labels.FromStrings("__name__", "test"), 0, util.TimeToMillis(time.Now())) req.Metadata = append(req.Metadata, &cortexpb.MetricMetadata{MetricFamilyName: "test", Help: "a help for metric", Unit: "", Type: cortexpb.COUNTER}) _, err := i.Push(ctx, req) require.NoError(t, err) @@ -5432,7 +5431,7 @@ func pushSingleSampleWithMetadata(t *testing.T, i *Ingester) { func pushSingleSampleAtTime(t *testing.T, i *Ingester, ts int64) { ctx := user.InjectOrgID(context.Background(), userID) - req, _ := mockWriteRequest(t, labels.Labels{{Name: labels.MetricName, Value: "test"}}, 0, ts) + req, _ := mockWriteRequest(t, labels.FromStrings("__name__", "test"), 0, ts) _, err := i.Push(ctx, req) require.NoError(t, err) } @@ -5461,8 +5460,8 @@ func TestHeadCompactionOnStartup(t *testing.T) { db.DisableCompactions() head := db.Head() - l := labels.Labels{{Name: "n", Value: "v"}} - for i := 0; i < numFullChunks; i++ { + l := labels.FromStrings("n", "v") + for i := range numFullChunks { // Not using db.Appender() as it checks for compaction. app := head.Appender(context.Background()) _, err := app.Append(0, l, int64(i)*chunkRange+1, 9.99) @@ -5519,7 +5518,7 @@ func TestIngester_CloseTSDBsOnShutdown(t *testing.T) { }) // Wait until it's ACTIVE - test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, 1*time.Second, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -5558,7 +5557,7 @@ func TestIngesterNotDeleteUnshippedBlocks(t *testing.T) { }) // Wait until it's ACTIVE - test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, 1*time.Second, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -5570,8 +5569,8 @@ func TestIngesterNotDeleteUnshippedBlocks(t *testing.T) { // Push some data to create 3 blocks. ctx := user.InjectOrgID(context.Background(), userID) - for j := int64(0); j < 5; j++ { - req, _ := mockWriteRequest(t, labels.Labels{{Name: labels.MetricName, Value: "test"}}, 0, j*chunkRangeMilliSec) + for j := range int64(5) { + req, _ := mockWriteRequest(t, labels.FromStrings(labels.MetricName, "test"), 0, j*chunkRangeMilliSec) _, err := i.Push(ctx, req) require.NoError(t, err) } @@ -5599,7 +5598,7 @@ func TestIngesterNotDeleteUnshippedBlocks(t *testing.T) { // Add more samples that could trigger another compaction and hence reload of blocks. for j := int64(5); j < 6; j++ { - req, _ := mockWriteRequest(t, labels.Labels{{Name: labels.MetricName, Value: "test"}}, 0, j*chunkRangeMilliSec) + req, _ := mockWriteRequest(t, labels.FromStrings(labels.MetricName, "test"), 0, j*chunkRangeMilliSec) _, err := i.Push(ctx, req) require.NoError(t, err) } @@ -5627,7 +5626,7 @@ func TestIngesterNotDeleteUnshippedBlocks(t *testing.T) { // Add more samples that could trigger another compaction and hence reload of blocks. for j := int64(6); j < 7; j++ { - req, _ := mockWriteRequest(t, labels.Labels{{Name: labels.MetricName, Value: "test"}}, 0, j*chunkRangeMilliSec) + req, _ := mockWriteRequest(t, labels.FromStrings(labels.MetricName, "test"), 0, j*chunkRangeMilliSec) _, err := i.Push(ctx, req) require.NoError(t, err) } @@ -5660,7 +5659,7 @@ func TestIngesterPushErrorDuringForcedCompaction(t *testing.T) { }) // Wait until it's ACTIVE - test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, 1*time.Second, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -5674,7 +5673,7 @@ func TestIngesterPushErrorDuringForcedCompaction(t *testing.T) { require.True(t, db.casState(active, forceCompacting)) // Ingestion should fail with a 503. - req, _ := mockWriteRequest(t, labels.Labels{{Name: labels.MetricName, Value: "test"}}, 0, util.TimeToMillis(time.Now())) + req, _ := mockWriteRequest(t, labels.FromStrings(labels.MetricName, "test"), 0, util.TimeToMillis(time.Now())) ctx := user.InjectOrgID(context.Background(), userID) _, err = i.Push(ctx, req) require.Equal(t, httpgrpc.Errorf(http.StatusServiceUnavailable, "%s", wrapWithUser(errors.New("forced compaction in progress"), userID).Error()), err) @@ -5695,12 +5694,12 @@ func TestIngesterNoFlushWithInFlightRequest(t *testing.T) { }) // Wait until it's ACTIVE - test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, 1*time.Second, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) // Push few samples. - for j := 0; j < 5; j++ { + for range 5 { pushSingleSampleWithMetadata(t, i) } @@ -5727,7 +5726,7 @@ func TestIngesterNoFlushWithInFlightRequest(t *testing.T) { db.releaseAppendLock() // Let's wait until all head series have been flushed. - test.Poll(t, 5*time.Second, uint64(0), func() interface{} { + test.Poll(t, 5*time.Second, uint64(0), func() any { db, err := i.getTSDB(userID) if err != nil || db == nil { return false @@ -5747,7 +5746,7 @@ func TestIngester_PushInstanceLimits(t *testing.T) { limits InstanceLimits reqs map[string][]*cortexpb.WriteRequest expectedErr error - expectedErrType interface{} + expectedErrType any }{ "should succeed creating one user and series": { limits: InstanceLimits{MaxInMemorySeries: 1, MaxInMemoryTenants: 1}, @@ -5856,7 +5855,7 @@ func TestIngester_PushInstanceLimits(t *testing.T) { defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck // Wait until the ingester is ACTIVE - test.Poll(t, 100*time.Millisecond, ring.ACTIVE, func() interface{} { + test.Poll(t, 100*time.Millisecond, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -5966,16 +5965,16 @@ func TestExpendedPostingsCacheIsolation(t *testing.T) { defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck // Wait until the ingester is ACTIVE - test.Poll(t, 100*time.Millisecond, ring.ACTIVE, func() interface{} { + test.Poll(t, 100*time.Millisecond, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) numberOfTenants := 100 wg := sync.WaitGroup{} - for k := 0; k < 10; k++ { + for k := range 10 { wg.Add(numberOfTenants) - for j := 0; j < numberOfTenants; j++ { + for j := range numberOfTenants { go func() { defer wg.Done() userId := fmt.Sprintf("user%v", j) @@ -5989,7 +5988,7 @@ func TestExpendedPostingsCacheIsolation(t *testing.T) { } wg.Add(numberOfTenants) - for j := 0; j < numberOfTenants; j++ { + for j := range numberOfTenants { go func() { defer wg.Done() userId := fmt.Sprintf("user%v", j) @@ -6027,7 +6026,7 @@ func TestExpendedPostingsCacheMatchers(t *testing.T) { defer services.StopAndAwaitTerminated(context.Background(), ing) //nolint:errcheck // Wait until the ingester is ACTIVE - test.Poll(t, 100*time.Millisecond, ring.ACTIVE, func() interface{} { + test.Poll(t, 100*time.Millisecond, ring.ACTIVE, func() any { return ing.lifecycler.GetState() }) @@ -6036,9 +6035,9 @@ func TestExpendedPostingsCacheMatchers(t *testing.T) { timeStamp := int64(60 * 1000) seriesCreated := map[string]labels.Labels{} - for i := 0; i < numberOfMetricNames; i++ { + for i := range numberOfMetricNames { metricName := fmt.Sprintf("metric_%v", i) - for j := 0; j < seriesPerMetricsNames; j++ { + for j := range seriesPerMetricsNames { s := labels.FromStrings(labels.MetricName, metricName, "labelA", fmt.Sprintf("series_%v", j)) _, err = ing.Push(ctx, cortexpb.ToWriteRequest([]labels.Labels{s}, []cortexpb.Sample{{Value: 2, TimestampMs: timeStamp}}, nil, nil, cortexpb.API)) seriesCreated[s.String()] = s @@ -6061,7 +6060,7 @@ func TestExpendedPostingsCacheMatchers(t *testing.T) { Value: "metric_0", } - for i := 0; i < 4; i++ { + for i := range 4 { tc := testCase{ matchers: []*client.LabelMatcher{nameMatcher}, } @@ -6192,7 +6191,7 @@ func TestExpendedPostingsCacheMatchers(t *testing.T) { db.postingCache.Clear() // lets run 2 times to hit the cache - for i := 0; i < 2; i++ { + for range 2 { verify(t, tc, r.startTs, r.endTs, r.hasSamples) } @@ -6318,7 +6317,7 @@ func TestExpendedPostingsCache(t *testing.T) { defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck // Wait until the ingester is ACTIVE - test.Poll(t, 100*time.Millisecond, ring.ACTIVE, func() interface{} { + test.Poll(t, 100*time.Millisecond, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -6328,7 +6327,7 @@ func TestExpendedPostingsCache(t *testing.T) { totalSamples := 4 * 60 var samples = make([]cortexpb.Sample, 0, totalSamples) - for i := 0; i < totalSamples; i++ { + for i := range totalSamples { samples = append(samples, cortexpb.Sample{ Value: float64(i), TimestampMs: int64(i * 60 * 1000), @@ -6336,7 +6335,7 @@ func TestExpendedPostingsCache(t *testing.T) { } lbls := make([]labels.Labels, 0, len(samples)) - for j := 0; j < 10; j++ { + for j := range 10 { for i := 0; i < len(samples); i++ { lbls = append(lbls, labels.FromStrings(labels.MetricName, metricNames[i%len(metricNames)], "a", fmt.Sprintf("aaa%v", j))) } @@ -6494,7 +6493,7 @@ func TestExpendedPostingsCache(t *testing.T) { require.Equal(t, int64(0), postingsForMatchersCalls.Load()) if c.shouldExpireDueInactivity { - test.Poll(t, c.cacheConfig.Blocks.Ttl+c.cacheConfig.Head.Ttl+cfg.BlocksStorageConfig.TSDB.ExpandedCachingExpireInterval, 0, func() interface{} { + test.Poll(t, c.cacheConfig.Blocks.Ttl+c.cacheConfig.Head.Ttl+cfg.BlocksStorageConfig.TSDB.ExpandedCachingExpireInterval, 0, func() any { size := 0 for _, userID := range i.getTSDBUsers() { userDB, _ := i.getTSDB(userID) @@ -6522,7 +6521,7 @@ func TestIngester_inflightPushRequests(t *testing.T) { defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck // Wait until the ingester is ACTIVE - test.Poll(t, 100*time.Millisecond, ring.ACTIVE, func() interface{} { + test.Poll(t, 100*time.Millisecond, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -6599,7 +6598,7 @@ func Test_Ingester_QueryExemplar_MaxInflightQueryRequest(t *testing.T) { defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck // Wait until it's ACTIVE - test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, 1*time.Second, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -6608,7 +6607,7 @@ func Test_Ingester_QueryExemplar_MaxInflightQueryRequest(t *testing.T) { // Mock request ctx := user.InjectOrgID(context.Background(), "test") - wreq, _ := mockWriteRequest(t, labels.Labels{{Name: labels.MetricName, Value: "test_1"}, {Name: "route", Value: "get_user"}, {Name: "status", Value: "200"}}, 1, 100000) + wreq, _ := mockWriteRequest(t, labels.FromStrings("__name__", "test_1", "route", "get_user", "status", "200"), 1, 100000) _, err = i.Push(ctx, wreq) require.NoError(t, err) @@ -6622,7 +6621,7 @@ func generateSamplesForLabel(l labels.Labels, count int, sampleIntervalInMs int) var lbls = make([]labels.Labels, 0, count) var samples = make([]cortexpb.Sample, 0, count) - for i := 0; i < count; i++ { + for i := range count { samples = append(samples, cortexpb.Sample{ Value: float64(i), TimestampMs: int64(i * sampleIntervalInMs), @@ -6728,7 +6727,7 @@ func Test_Ingester_ModeHandler(t *testing.T) { defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck // Wait until it's ACTIVE - test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, 1*time.Second, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -6737,7 +6736,7 @@ func Test_Ingester_ModeHandler(t *testing.T) { require.NoError(t, err) // Wait until initial state - test.Poll(t, 1*time.Second, testData.initialState, func() interface{} { + test.Poll(t, 1*time.Second, testData.initialState, func() any { return i.lifecycler.GetState() }) } @@ -6753,7 +6752,7 @@ func Test_Ingester_ModeHandler(t *testing.T) { require.Equal(t, testData.expectedState, i.lifecycler.GetState()) if testData.expectedIsReady { // Wait for instance to own tokens - test.Poll(t, 1*time.Second, nil, func() interface{} { + test.Poll(t, 1*time.Second, nil, func() any { return i.CheckReady(context.Background()) }) require.NoError(t, i.CheckReady(context.Background())) @@ -6988,7 +6987,7 @@ func TestIngester_UpdateLabelSetMetrics(t *testing.T) { defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck // Wait until it's ACTIVE - test.Poll(t, time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, time.Second, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) // Add user ID. @@ -7050,7 +7049,7 @@ func TestIngesterPanicHandling(t *testing.T) { defer services.StopAndAwaitTerminated(ctx, i) //nolint:errcheck // Wait until it's ACTIVE - test.Poll(t, 1*time.Second, ring.ACTIVE, func() interface{} { + test.Poll(t, 1*time.Second, ring.ACTIVE, func() any { return i.lifecycler.GetState() }) @@ -7149,7 +7148,7 @@ func CreateBlock(t *testing.T, ctx context.Context, dir string, mint, maxt int64 var ref storage.SeriesRef start := (maxt-mint)/2 + mint - _, err = app.Append(ref, labels.Labels{labels.Label{Name: "test_label", Value: "test_value"}}, start, float64(1)) + _, err = app.Append(ref, labels.FromStrings("test_label", "test_value"), start, float64(1)) require.NoError(t, err) err = app.Commit() require.NoError(t, err) diff --git a/pkg/ingester/instance_limits.go b/pkg/ingester/instance_limits.go index cb48df3687..cea165dd2f 100644 --- a/pkg/ingester/instance_limits.go +++ b/pkg/ingester/instance_limits.go @@ -38,7 +38,7 @@ func (cfg *InstanceLimits) RegisterFlagsWithPrefix(f *flag.FlagSet, prefix strin } // UnmarshalYAML implements the yaml.Unmarshaler interface. If give -func (l *InstanceLimits) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (l *InstanceLimits) UnmarshalYAML(unmarshal func(any) error) error { if defaultInstanceLimits != nil { *l = *defaultInstanceLimits } diff --git a/pkg/ingester/lifecycle_test.go b/pkg/ingester/lifecycle_test.go index 4fab7d716e..a0fd0ad868 100644 --- a/pkg/ingester/lifecycle_test.go +++ b/pkg/ingester/lifecycle_test.go @@ -73,7 +73,7 @@ func TestIngesterRestart(t *testing.T) { require.NoError(t, services.StopAndAwaitTerminated(context.Background(), ingester)) } - test.Poll(t, 100*time.Millisecond, 1, func() interface{} { + test.Poll(t, 100*time.Millisecond, 1, func() any { return numTokens(config.LifecyclerConfig.RingConfig.KVStore.Mock, "localhost", RingKey) }) @@ -88,7 +88,7 @@ func TestIngesterRestart(t *testing.T) { time.Sleep(200 * time.Millisecond) - test.Poll(t, 100*time.Millisecond, 1, func() interface{} { + test.Poll(t, 100*time.Millisecond, 1, func() any { return numTokens(config.LifecyclerConfig.RingConfig.KVStore.Mock, "localhost", RingKey) }) } @@ -104,7 +104,7 @@ func TestIngester_ShutdownHandler(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(context.Background(), ingester)) // Make sure the ingester has been added to the ring. - test.Poll(t, 100*time.Millisecond, 1, func() interface{} { + test.Poll(t, 100*time.Millisecond, 1, func() any { return numTokens(config.LifecyclerConfig.RingConfig.KVStore.Mock, "localhost", RingKey) }) @@ -113,7 +113,7 @@ func TestIngester_ShutdownHandler(t *testing.T) { require.Equal(t, http.StatusNoContent, recorder.Result().StatusCode) // Make sure the ingester has been removed from the ring even when UnregisterFromRing is false. - test.Poll(t, 100*time.Millisecond, 0, func() interface{} { + test.Poll(t, 100*time.Millisecond, 0, func() any { return numTokens(config.LifecyclerConfig.RingConfig.KVStore.Mock, "localhost", RingKey) }) }) diff --git a/pkg/ingester/limiter.go b/pkg/ingester/limiter.go index bfeec07e14..69920f7622 100644 --- a/pkg/ingester/limiter.go +++ b/pkg/ingester/limiter.go @@ -21,9 +21,9 @@ var ( type errMaxSeriesPerLabelSetLimitExceeded struct { error - id string - localLimit int - globalLimit int + id string + actualLocalLimit int + globalLimit int } // RingCount is the interface exposed by a ring implementation which allows @@ -130,9 +130,9 @@ func (l *Limiter) AssertMaxSeriesPerLabelSet(userID string, metric labels.Labels return err } else if u >= local { return errMaxSeriesPerLabelSetLimitExceeded{ - id: limit.Id, - localLimit: local, - globalLimit: limit.Limits.MaxSeries, + id: limit.Id, + actualLocalLimit: local, + globalLimit: limit.Limits.MaxSeries, } } } @@ -208,8 +208,8 @@ func (l *Limiter) formatMaxMetadataPerMetricError(userID string, metric string) } func (l *Limiter) formatMaxSeriesPerLabelSetError(err errMaxSeriesPerLabelSetLimitExceeded) error { - return fmt.Errorf("per-labelset series limit of %d exceeded (labelSet: %s, local limit: %d global limit: %d actual)", - minNonZero(err.globalLimit, err.localLimit), err.id, err.localLimit, err.globalLimit) + return fmt.Errorf("per-labelset series limit of %d exceeded (labelSet: %s, global limit: %d actual local limit: %d)", + minNonZero(err.globalLimit, err.actualLocalLimit), err.id, err.globalLimit, err.actualLocalLimit) } func (l *Limiter) limitsPerLabelSets(userID string, metric labels.Labels) []validation.LimitsPerLabelSet { diff --git a/pkg/ingester/limiter_test.go b/pkg/ingester/limiter_test.go index 82a53f9dbd..17d723c21f 100644 --- a/pkg/ingester/limiter_test.go +++ b/pkg/ingester/limiter_test.go @@ -221,7 +221,6 @@ func runLimiterMaxFunctionTest( } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { // Mock the ring @@ -288,7 +287,6 @@ func TestLimiter_AssertMaxSeriesPerMetric(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { // Mock the ring @@ -349,7 +347,6 @@ func TestLimiter_AssertMaxMetadataPerMetric(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { // Mock the ring @@ -411,7 +408,6 @@ func TestLimiter_AssertMaxSeriesPerUser(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { // Mock the ring @@ -473,7 +469,6 @@ func TestLimiter_AssertMaxNativeHistogramsSeriesPerUser(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { // Mock the ring @@ -526,7 +521,7 @@ func TestLimiter_AssertMaxSeriesPerLabelSet(t *testing.T) { ringIngesterCount: 10, series: 200, shardByAllLabels: true, - expected: errMaxSeriesPerLabelSetLimitExceeded{globalLimit: 10, localLimit: 3}, + expected: errMaxSeriesPerLabelSetLimitExceeded{globalLimit: 10, actualLocalLimit: 3}, limits: validation.Limits{ LimitsPerLabelSet: []validation.LimitsPerLabelSet{ { @@ -557,7 +552,6 @@ func TestLimiter_AssertMaxSeriesPerLabelSet(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { // Mock the ring @@ -618,7 +612,6 @@ func TestLimiter_AssertMaxMetricsWithMetadataPerUser(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { // Mock the ring @@ -714,7 +707,6 @@ func TestLimiter_minNonZero(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { assert.Equal(t, testData.expected, minNonZero(testData.first, testData.second)) diff --git a/pkg/ingester/metrics.go b/pkg/ingester/metrics.go index fc05b9764b..8160216f2a 100644 --- a/pkg/ingester/metrics.go +++ b/pkg/ingester/metrics.go @@ -20,6 +20,13 @@ const ( const ( sampleMetricTypeFloat = "float" sampleMetricTypeHistogram = "histogram" + + typeSeries = "series" + typeSamples = "samples" + typeExemplars = "exemplars" + typeHistograms = "histograms" + typeMetadata = "metadata" + typeTombstones = "tombstones" ) type ingesterMetrics struct { @@ -330,6 +337,8 @@ type tsdbMetrics struct { tsdbWALTruncateTotal *prometheus.Desc tsdbWALTruncateDuration *prometheus.Desc tsdbWALCorruptionsTotal *prometheus.Desc + tsdbWALReplayUnknownRefsTotal *prometheus.Desc + tsdbWBLReplayUnknownRefsTotal *prometheus.Desc tsdbWALWritesFailed *prometheus.Desc tsdbHeadTruncateFail *prometheus.Desc tsdbHeadTruncateTotal *prometheus.Desc @@ -437,6 +446,14 @@ func newTSDBMetrics(r prometheus.Registerer) *tsdbMetrics { "cortex_ingester_tsdb_wal_corruptions_total", "Total number of TSDB WAL corruptions.", nil, nil), + tsdbWALReplayUnknownRefsTotal: prometheus.NewDesc( + "cortex_ingester_tsdb_wal_replay_unknown_refs_total", + "Total number of unknown series references encountered during TSDB WAL replay.", + []string{"type"}, nil), + tsdbWBLReplayUnknownRefsTotal: prometheus.NewDesc( + "cortex_ingester_tsdb_wbl_replay_unknown_refs_total", + "Total number of unknown series references encountered during TSDB WBL replay.", + []string{"type"}, nil), tsdbWALWritesFailed: prometheus.NewDesc( "cortex_ingester_tsdb_wal_writes_failed_total", "Total number of TSDB WAL writes that failed.", @@ -601,6 +618,8 @@ func (sm *tsdbMetrics) Describe(out chan<- *prometheus.Desc) { out <- sm.tsdbWALTruncateTotal out <- sm.tsdbWALTruncateDuration out <- sm.tsdbWALCorruptionsTotal + out <- sm.tsdbWALReplayUnknownRefsTotal + out <- sm.tsdbWBLReplayUnknownRefsTotal out <- sm.tsdbWALWritesFailed out <- sm.tsdbHeadTruncateFail out <- sm.tsdbHeadTruncateTotal @@ -659,6 +678,8 @@ func (sm *tsdbMetrics) Collect(out chan<- prometheus.Metric) { data.SendSumOfCounters(out, sm.tsdbWALTruncateTotal, "prometheus_tsdb_wal_truncations_total") data.SendSumOfSummaries(out, sm.tsdbWALTruncateDuration, "prometheus_tsdb_wal_truncate_duration_seconds") data.SendSumOfCounters(out, sm.tsdbWALCorruptionsTotal, "prometheus_tsdb_wal_corruptions_total") + data.SendSumOfCountersWithLabels(out, sm.tsdbWALReplayUnknownRefsTotal, "prometheus_tsdb_wal_replay_unknown_refs_total", "type") + data.SendSumOfCountersWithLabels(out, sm.tsdbWBLReplayUnknownRefsTotal, "prometheus_tsdb_wbl_replay_unknown_refs_total", "type") data.SendSumOfCounters(out, sm.tsdbWALWritesFailed, "prometheus_tsdb_wal_writes_failed_total") data.SendSumOfCounters(out, sm.tsdbHeadTruncateFail, "prometheus_tsdb_head_truncations_failed_total") data.SendSumOfCounters(out, sm.tsdbHeadTruncateTotal, "prometheus_tsdb_head_truncations_total") diff --git a/pkg/ingester/metrics_test.go b/pkg/ingester/metrics_test.go index b08b0ca814..9c7d316b96 100644 --- a/pkg/ingester/metrics_test.go +++ b/pkg/ingester/metrics_test.go @@ -240,6 +240,18 @@ func TestTSDBMetrics(t *testing.T) { # TYPE cortex_ingester_tsdb_wal_corruptions_total counter cortex_ingester_tsdb_wal_corruptions_total 2.676537e+06 + # HELP cortex_ingester_tsdb_wal_replay_unknown_refs_total Total number of unknown series references encountered during TSDB WAL replay. + # TYPE cortex_ingester_tsdb_wal_replay_unknown_refs_total counter + cortex_ingester_tsdb_wal_replay_unknown_refs_total{type="series"} 300 + cortex_ingester_tsdb_wal_replay_unknown_refs_total{type="samples"} 303 + cortex_ingester_tsdb_wal_replay_unknown_refs_total{type="metadata"} 306 + + # HELP cortex_ingester_tsdb_wbl_replay_unknown_refs_total Total number of unknown series references encountered during TSDB WBL replay. + # TYPE cortex_ingester_tsdb_wbl_replay_unknown_refs_total counter + cortex_ingester_tsdb_wbl_replay_unknown_refs_total{type="exemplars"} 300 + cortex_ingester_tsdb_wbl_replay_unknown_refs_total{type="histograms"} 303 + cortex_ingester_tsdb_wbl_replay_unknown_refs_total{type="tombstones"} 306 + # HELP cortex_ingester_tsdb_wal_writes_failed_total Total number of TSDB WAL writes that failed. # TYPE cortex_ingester_tsdb_wal_writes_failed_total counter cortex_ingester_tsdb_wal_writes_failed_total 1486965 @@ -505,6 +517,18 @@ func TestTSDBMetricsWithRemoval(t *testing.T) { # TYPE cortex_ingester_tsdb_wal_corruptions_total counter cortex_ingester_tsdb_wal_corruptions_total 2.676537e+06 + # HELP cortex_ingester_tsdb_wal_replay_unknown_refs_total Total number of unknown series references encountered during TSDB WAL replay. + # TYPE cortex_ingester_tsdb_wal_replay_unknown_refs_total counter + cortex_ingester_tsdb_wal_replay_unknown_refs_total{type="series"} 300 + cortex_ingester_tsdb_wal_replay_unknown_refs_total{type="samples"} 303 + cortex_ingester_tsdb_wal_replay_unknown_refs_total{type="metadata"} 306 + + # HELP cortex_ingester_tsdb_wbl_replay_unknown_refs_total Total number of unknown series references encountered during TSDB WBL replay. + # TYPE cortex_ingester_tsdb_wbl_replay_unknown_refs_total counter + cortex_ingester_tsdb_wbl_replay_unknown_refs_total{type="exemplars"} 300 + cortex_ingester_tsdb_wbl_replay_unknown_refs_total{type="histograms"} 303 + cortex_ingester_tsdb_wbl_replay_unknown_refs_total{type="tombstones"} 306 + # HELP cortex_ingester_tsdb_wal_writes_failed_total Total number of TSDB WAL writes that failed. # TYPE cortex_ingester_tsdb_wal_writes_failed_total counter cortex_ingester_tsdb_wal_writes_failed_total 1486965 @@ -883,6 +907,22 @@ func populateTSDBMetrics(base float64) *prometheus.Registry { }) snapshotReplayErrorTotal.Add(103) + walReplayUnknownRefsTotal := promauto.With(r).NewCounterVec(prometheus.CounterOpts{ + Name: "prometheus_tsdb_wal_replay_unknown_refs_total", + Help: "Total number of unknown series references encountered during WAL replay.", + }, []string{"type"}) + walReplayUnknownRefsTotal.WithLabelValues(typeSeries).Add(100) + walReplayUnknownRefsTotal.WithLabelValues(typeSamples).Add(101) + walReplayUnknownRefsTotal.WithLabelValues(typeMetadata).Add(102) + + wblReplayUnknownRefsTotal := promauto.With(r).NewCounterVec(prometheus.CounterOpts{ + Name: "prometheus_tsdb_wbl_replay_unknown_refs_total", + Help: "Total number of unknown series references encountered during WBL replay.", + }, []string{"type"}) + wblReplayUnknownRefsTotal.WithLabelValues(typeExemplars).Add(100) + wblReplayUnknownRefsTotal.WithLabelValues(typeHistograms).Add(101) + wblReplayUnknownRefsTotal.WithLabelValues(typeTombstones).Add(102) + oooHistogram := promauto.With(r).NewHistogram(prometheus.HistogramOpts{ Name: "prometheus_tsdb_sample_ooo_delta", Help: "Delta in seconds by which a sample is considered out of order (reported regardless of OOO time window and whether sample is accepted or not).", diff --git a/pkg/ingester/user_state.go b/pkg/ingester/user_state.go index 062f4d5e1b..2918c8993a 100644 --- a/pkg/ingester/user_state.go +++ b/pkg/ingester/user_state.go @@ -38,7 +38,7 @@ type metricCounter struct { func newMetricCounter(limiter *Limiter, ignoredMetricsForSeriesCount map[string]struct{}) *metricCounter { shards := make([]metricCounterShard, 0, numMetricCounterShards) - for i := 0; i < numMetricCounterShards; i++ { + for range numMetricCounterShards { shards = append(shards, metricCounterShard{ m: map[string]int{}, }) @@ -103,7 +103,7 @@ type labelSetCounter struct { func newLabelSetCounter(limiter *Limiter) *labelSetCounter { shards := make([]*labelSetCounterShard, 0, numMetricCounterShards) - for i := 0; i < numMetricCounterShards; i++ { + for range numMetricCounterShards { shards = append(shards, &labelSetCounterShard{ RWMutex: &sync.RWMutex{}, valuesCounter: map[uint64]*labelSetCounterEntry{}, @@ -191,9 +191,9 @@ func getCardinalityForLimitsPerLabelSet(ctx context.Context, numSeries uint64, i } func getPostingForLabels(ctx context.Context, ir tsdb.IndexReader, lbls labels.Labels) (index.Postings, error) { - postings := make([]index.Postings, 0, len(lbls)) - for _, lbl := range lbls { - p, err := ir.Postings(ctx, lbl.Name, lbl.Value) + postings := make([]index.Postings, 0, lbls.Len()) + for name, value := range lbls.Map() { + p, err := ir.Postings(ctx, name, value) if err != nil { return nil, err } @@ -252,7 +252,7 @@ func (m *labelSetCounter) UpdateMetric(ctx context.Context, u *userTSDB, metrics } nonDefaultPartitionChanged := false - for i := 0; i < numMetricCounterShards; i++ { + for i := range numMetricCounterShards { s := m.shards[i] s.RLock() for h, entry := range s.valuesCounter { diff --git a/pkg/ingester/user_state_test.go b/pkg/ingester/user_state_test.go index a75b7e3e3e..38be322854 100644 --- a/pkg/ingester/user_state_test.go +++ b/pkg/ingester/user_state_test.go @@ -343,11 +343,11 @@ func (ir *mockIndexReader) Postings(ctx context.Context, name string, values ... func (ir *mockIndexReader) Symbols() index.StringIter { return nil } -func (ir *mockIndexReader) SortedLabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, error) { +func (ir *mockIndexReader) SortedLabelValues(ctx context.Context, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, error) { return nil, nil } -func (ir *mockIndexReader) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, error) { +func (ir *mockIndexReader) LabelValues(ctx context.Context, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, error) { return nil, nil } diff --git a/pkg/parquetconverter/converter.go b/pkg/parquetconverter/converter.go index 4eca20ac0a..477149705c 100644 --- a/pkg/parquetconverter/converter.go +++ b/pkg/parquetconverter/converter.go @@ -104,11 +104,11 @@ type Converter struct { func (cfg *Config) RegisterFlags(f *flag.FlagSet) { cfg.Ring.RegisterFlags(f) - f.StringVar(&cfg.DataDir, "parquet-converter.data-dir", "./data", "Data directory in which to cache blocks and process conversions.") - f.IntVar(&cfg.MetaSyncConcurrency, "parquet-converter.meta-sync-concurrency", 20, "Number of Go routines to use when syncing block meta files from the long term storage.") - f.IntVar(&cfg.MaxRowsPerRowGroup, "parquet-converter.max-rows-per-row-group", 1e6, "Max number of rows per parquet row group.") - f.DurationVar(&cfg.ConversionInterval, "parquet-converter.conversion-interval", time.Minute, "The frequency at which the conversion job runs.") - f.BoolVar(&cfg.FileBufferEnabled, "parquet-converter.file-buffer-enabled", true, "Whether to enable buffering the writes in disk to reduce memory utilization.") + f.StringVar(&cfg.DataDir, "parquet-converter.data-dir", "./data", "Local directory path for caching TSDB blocks during parquet conversion.") + f.IntVar(&cfg.MetaSyncConcurrency, "parquet-converter.meta-sync-concurrency", 20, "Maximum concurrent goroutines for downloading block metadata from object storage.") + f.IntVar(&cfg.MaxRowsPerRowGroup, "parquet-converter.max-rows-per-row-group", 1e6, "Maximum number of time series per parquet row group. Larger values improve compression but may reduce performance during reads.") + f.DurationVar(&cfg.ConversionInterval, "parquet-converter.conversion-interval", time.Minute, "How often to check for new TSDB blocks to convert to parquet format.") + f.BoolVar(&cfg.FileBufferEnabled, "parquet-converter.file-buffer-enabled", true, "Enable disk-based write buffering to reduce memory consumption during parquet file generation.") } func NewConverter(cfg Config, storageCfg cortex_tsdb.BlocksStorageConfig, blockRanges []int64, logger log.Logger, registerer prometheus.Registerer, limits *validation.Overrides) (*Converter, error) { @@ -139,7 +139,6 @@ func newConverter(cfg Config, bkt objstore.InstrumentedBucket, storageCfg cortex metrics: newMetrics(registerer), bkt: bkt, baseConverterOptions: []convert.ConvertOption{ - convert.WithSortBy(labels.MetricName), convert.WithColDuration(time.Hour * 8), convert.WithRowGroupSize(cfg.MaxRowsPerRowGroup), }, @@ -430,6 +429,11 @@ func (c *Converter) convertUser(ctx context.Context, logger log.Logger, ring rin converterOpts := append(c.baseConverterOptions, convert.WithName(b.ULID.String())) + sortColumns := []string{labels.MetricName} + userConfiguredSortColumns := c.limits.ParquetConverterSortColumns(userID) + sortColumns = append(sortColumns, userConfiguredSortColumns...) + converterOpts = append(converterOpts, convert.WithSortBy(sortColumns...)) + if c.cfg.FileBufferEnabled { converterOpts = append(converterOpts, convert.WithColumnPageBuffers(parquet.NewFileBufferPool(bdir, "buffers.*"))) } diff --git a/pkg/parquetconverter/converter_test.go b/pkg/parquetconverter/converter_test.go index fc8f6e9980..81caa86c63 100644 --- a/pkg/parquetconverter/converter_test.go +++ b/pkg/parquetconverter/converter_test.go @@ -59,14 +59,23 @@ func TestConverter(t *testing.T) { flagext.DefaultValues(limits) limits.ParquetConverterEnabled = true - c, logger, _ := prepare(t, cfg, objstore.WithNoopInstr(bucketClient), limits) + userSpecificSortColumns := []string{"cluster", "namespace"} + + // Create a mock tenant limits implementation + tenantLimits := &mockTenantLimits{ + limits: map[string]*validation.Limits{ + user: { + ParquetConverterSortColumns: userSpecificSortColumns, + ParquetConverterEnabled: true, + }, + }, + } + + c, logger, _ := prepare(t, cfg, objstore.WithNoopInstr(bucketClient), limits, tenantLimits) ctx := context.Background() - lbls := labels.Labels{labels.Label{ - Name: "__name__", - Value: "test", - }} + lbls := labels.FromStrings("__name__", "test") blocks := []ulid.ULID{} // Create blocks @@ -92,7 +101,7 @@ func TestConverter(t *testing.T) { blocksConverted := []ulid.ULID{} - test.Poll(t, 3*time.Minute, 1, func() interface{} { + test.Poll(t, 3*time.Minute, 1, func() any { blocksConverted = blocksConverted[:0] for _, bIds := range blocks { m, err := parquet.ReadConverterMark(ctx, bIds, userBucket, logger) @@ -131,12 +140,12 @@ func TestConverter(t *testing.T) { require.NoError(t, cortex_tsdb.WriteTenantDeletionMark(context.Background(), objstore.WithNoopInstr(bucketClient), user, cortex_tsdb.NewTenantDeletionMark(time.Now()))) // Should clean sync folders - test.Poll(t, time.Minute, 0, func() interface{} { + test.Poll(t, time.Minute, 0, func() any { return len(c.listTenantsWithMetaSyncDirectories()) }) // Verify metrics after user deletion - test.Poll(t, time.Minute*10, true, func() interface{} { + test.Poll(t, time.Minute*10, true, func() any { if testutil.ToFloat64(c.metrics.convertedBlocks.WithLabelValues(user)) != 0.0 { return false } @@ -160,7 +169,7 @@ func prepareConfig() Config { return cfg } -func prepare(t *testing.T, cfg Config, bucketClient objstore.InstrumentedBucket, limits *validation.Limits) (*Converter, log.Logger, prometheus.Gatherer) { +func prepare(t *testing.T, cfg Config, bucketClient objstore.InstrumentedBucket, limits *validation.Limits, tenantLimits validation.TenantLimits) (*Converter, log.Logger, prometheus.Gatherer) { storageCfg := cortex_tsdb.BlocksStorageConfig{} blockRanges := cortex_tsdb.DurationList{2 * time.Hour, 12 * time.Hour, 24 * time.Hour} flagext.DefaultValues(&storageCfg) @@ -179,7 +188,7 @@ func prepare(t *testing.T, cfg Config, bucketClient objstore.InstrumentedBucket, flagext.DefaultValues(limits) } - overrides := validation.NewOverrides(*limits, nil) + overrides := validation.NewOverrides(*limits, tenantLimits) scanner, err := users.NewScanner(cortex_tsdb.UsersScannerConfig{ Strategy: cortex_tsdb.UserScanStrategyList, @@ -254,10 +263,7 @@ func TestConverter_BlockConversionFailure(t *testing.T) { require.NoError(t, err) // Create test labels - lbls := labels.Labels{labels.Label{ - Name: "__name__", - Value: "test", - }} + lbls := labels.FromStrings("__name__", "test") // Create a real TSDB block dir := t.TempDir() @@ -312,10 +318,7 @@ func TestConverter_ShouldNotFailOnAccessDenyError(t *testing.T) { require.NoError(t, err) // Create test labels - lbls := labels.Labels{labels.Label{ - Name: "__name__", - Value: "test", - }} + lbls := labels.FromStrings("__name__", "test") // Create a real TSDB block dir := t.TempDir() @@ -366,11 +369,11 @@ type mockBucket struct { getFailure error } -func (m *mockBucket) Upload(ctx context.Context, name string, r io.Reader) error { +func (m *mockBucket) Upload(ctx context.Context, name string, r io.Reader, opts ...objstore.ObjectUploadOption) error { if m.uploadFailure != nil { return m.uploadFailure } - return m.Bucket.Upload(ctx, name, r) + return m.Bucket.Upload(ctx, name, r, opts...) } func (m *mockBucket) Get(ctx context.Context, name string) (io.ReadCloser, error) { @@ -393,3 +396,19 @@ func (r *RingMock) Get(key uint32, op ring.Operation, bufDescs []ring.InstanceDe }, }, nil } + +// mockTenantLimits implements the validation.TenantLimits interface for testing +type mockTenantLimits struct { + limits map[string]*validation.Limits +} + +func (m *mockTenantLimits) ByUserID(userID string) *validation.Limits { + if limits, ok := m.limits[userID]; ok { + return limits + } + return nil +} + +func (m *mockTenantLimits) AllByUserID() map[string]*validation.Limits { + return m.limits +} diff --git a/pkg/parquetconverter/metrics.go b/pkg/parquetconverter/metrics.go index 57ff4c065e..2b3e80b0cf 100644 --- a/pkg/parquetconverter/metrics.go +++ b/pkg/parquetconverter/metrics.go @@ -30,7 +30,7 @@ func newMetrics(reg prometheus.Registerer) *metrics { convertParquetBlockDelay: promauto.With(reg).NewHistogram(prometheus.HistogramOpts{ Name: "cortex_parquet_converter_convert_block_delay_minutes", Help: "Delay in minutes of Parquet block to be converted from the TSDB block being uploaded to object store", - Buckets: []float64{5, 10, 15, 20, 30, 45, 60, 80, 100, 120}, + Buckets: []float64{5, 10, 15, 20, 30, 45, 60, 80, 100, 120, 150, 180, 210, 240, 270, 300}, }), ownedUsers: promauto.With(reg).NewGauge(prometheus.GaugeOpts{ Name: "cortex_parquet_converter_users_owned", diff --git a/pkg/querier/batch/batch.go b/pkg/querier/batch/batch.go index 79dfe8081e..af645d14dc 100644 --- a/pkg/querier/batch/batch.go +++ b/pkg/querier/batch/batch.go @@ -55,7 +55,6 @@ type iterator interface { func NewChunkMergeIterator(it chunkenc.Iterator, chunks []chunk.Chunk, _, _ model.Time) chunkenc.Iterator { converted := make([]GenericChunk, len(chunks)) for i, c := range chunks { - c := c converted[i] = NewGenericChunk(int64(c.From), int64(c.Through), c.NewIterator) } @@ -141,10 +140,7 @@ func (a *iteratorAdapter) Next() chunkenc.ValueType { a.curr.Index++ for a.curr.Index >= a.curr.Length && a.underlying.Next(a.batchSize) != chunkenc.ValNone { a.curr = a.underlying.Batch() - a.batchSize = a.batchSize * 2 - if a.batchSize > chunk.BatchSize { - a.batchSize = chunk.BatchSize - } + a.batchSize = min(a.batchSize*2, chunk.BatchSize) } if a.curr.Index < a.curr.Length { return a.curr.ValType diff --git a/pkg/querier/batch/batch_test.go b/pkg/querier/batch/batch_test.go index 4f4b57bfe4..d90a0e1033 100644 --- a/pkg/querier/batch/batch_test.go +++ b/pkg/querier/batch/batch_test.go @@ -51,12 +51,11 @@ func BenchmarkNewChunkMergeIterator_CreateAndIterate(b *testing.B) { chunks := createChunks(b, step, scenario.numChunks, scenario.numSamplesPerChunk, scenario.duplicationFactor, scenario.enc) - b.ResetTimer() b.Run(name, func(b *testing.B) { b.ReportAllocs() var it chunkenc.Iterator - for n := 0; n < b.N; n++ { + for b.Loop() { it = NewChunkMergeIterator(it, chunks, 0, 0) for it.Next() != chunkenc.ValNone { it.At() @@ -106,11 +105,10 @@ func BenchmarkNewChunkMergeIterator_Seek(b *testing.B) { chunks := createChunks(b, scenario.scrapeInterval, scenario.numChunks, scenario.numSamplesPerChunk, scenario.duplicationFactor, scenario.enc) - b.ResetTimer() b.Run(name, func(b *testing.B) { b.ReportAllocs() var it chunkenc.Iterator - for n := 0; n < b.N; n++ { + for b.Loop() { it = NewChunkMergeIterator(it, chunks, 0, 0) i := int64(0) for it.Seek(i*scenario.seekStep.Milliseconds()) != chunkenc.ValNone { @@ -164,8 +162,8 @@ func TestSeekCorrectlyDealWithSinglePointChunks(t *testing.T) { func createChunks(b *testing.B, step time.Duration, numChunks, numSamplesPerChunk, duplicationFactor int, enc promchunk.Encoding) []chunk.Chunk { result := make([]chunk.Chunk, 0, numChunks) - for d := 0; d < duplicationFactor; d++ { - for c := 0; c < numChunks; c++ { + for range duplicationFactor { + for c := range numChunks { minTime := step * time.Duration(c*numSamplesPerChunk) result = append(result, util.GenerateChunk(b, step, model.Time(minTime.Milliseconds()), numSamplesPerChunk, enc)) } diff --git a/pkg/querier/batch/chunk_test.go b/pkg/querier/batch/chunk_test.go index becb4e7dff..623de16601 100644 --- a/pkg/querier/batch/chunk_test.go +++ b/pkg/querier/batch/chunk_test.go @@ -39,7 +39,6 @@ func forEncodings(t *testing.T, f func(t *testing.T, enc promchunk.Encoding)) { promchunk.PrometheusHistogramChunk, //promchunk.PrometheusFloatHistogramChunk, } { - enc := enc t.Run(enc.String(), func(t *testing.T) { t.Parallel() f(t, enc) @@ -55,7 +54,7 @@ func mkGenericChunk(t require.TestingT, from model.Time, points int, enc promchu func testIter(t require.TestingT, points int, iter chunkenc.Iterator, enc promchunk.Encoding) { histograms := histogram_util.GenerateTestHistograms(0, 1000, points) ets := model.TimeFromUnix(0) - for i := 0; i < points; i++ { + for i := range points { require.Equal(t, iter.Next(), enc.ChunkValueType(), strconv.Itoa(i)) switch enc { case promchunk.PrometheusXorChunk: @@ -132,7 +131,7 @@ func TestSeek(t *testing.T) { it: &it, } - for i := 0; i < chunk.BatchSize-1; i++ { + for i := range chunk.BatchSize - 1 { require.Equal(t, chunkenc.ValFloat, c.Seek(int64(i), 1)) } require.Equal(t, 1, it.seeks) @@ -159,7 +158,7 @@ func (i *mockIterator) Batch(size int, valType chunkenc.ValueType) chunk.Batch { Length: chunk.BatchSize, ValType: valType, } - for i := 0; i < chunk.BatchSize; i++ { + for i := range chunk.BatchSize { batch.Timestamps[i] = int64(i) } return batch diff --git a/pkg/querier/batch/merge.go b/pkg/querier/batch/merge.go index 27030149d2..33c0f91787 100644 --- a/pkg/querier/batch/merge.go +++ b/pkg/querier/batch/merge.go @@ -70,12 +70,12 @@ func (c *mergeIterator) Reset(size int) *mergeIterator { c.batchesBuf = make(batchStream, len(c.its)) } else { c.batchesBuf = c.batchesBuf[:size] - for i := 0; i < size; i++ { + for i := range size { c.batchesBuf[i] = promchunk.Batch{} } } - for i := 0; i < len(c.nextBatchBuf); i++ { + for i := range len(c.nextBatchBuf) { c.nextBatchBuf[i] = promchunk.Batch{} } @@ -192,11 +192,11 @@ func (h *iteratorHeap) Less(i, j int) bool { return iT < jT } -func (h *iteratorHeap) Push(x interface{}) { +func (h *iteratorHeap) Push(x any) { *h = append(*h, x.(iterator)) } -func (h *iteratorHeap) Pop() interface{} { +func (h *iteratorHeap) Pop() any { old := *h n := len(old) x := old[n-1] diff --git a/pkg/querier/batch/merge_test.go b/pkg/querier/batch/merge_test.go index d835640d70..a7ab54b94b 100644 --- a/pkg/querier/batch/merge_test.go +++ b/pkg/querier/batch/merge_test.go @@ -30,16 +30,15 @@ func TestMergeIter(t *testing.T) { func BenchmarkMergeIterator(b *testing.B) { chunks := make([]GenericChunk, 0, 10) - for i := 0; i < 10; i++ { + for i := range 10 { chunks = append(chunks, mkGenericChunk(b, model.Time(i*25), 120, encoding.PrometheusXorChunk)) } iter := newMergeIterator(nil, chunks) for _, r := range []bool{true, false} { b.Run(fmt.Sprintf("reuse-%t", r), func(b *testing.B) { - b.ResetTimer() b.ReportAllocs() - for i := 0; i < b.N; i++ { + for b.Loop() { if r { iter = newMergeIterator(iter, chunks) } else { @@ -64,7 +63,7 @@ func TestMergeHarder(t *testing.T) { offset = 30 samples = 100 ) - for i := 0; i < numChunks; i++ { + for range numChunks { chunks = append(chunks, mkGenericChunk(t, from, samples, enc)) from = from.Add(time.Duration(offset) * time.Second) } diff --git a/pkg/querier/batch/non_overlapping_test.go b/pkg/querier/batch/non_overlapping_test.go index 2377e8c3fa..7fc4408666 100644 --- a/pkg/querier/batch/non_overlapping_test.go +++ b/pkg/querier/batch/non_overlapping_test.go @@ -12,7 +12,7 @@ func TestNonOverlappingIter(t *testing.T) { t.Parallel() forEncodings(t, func(t *testing.T, enc encoding.Encoding) { cs := []GenericChunk(nil) - for i := int64(0); i < 100; i++ { + for i := range int64(100) { cs = append(cs, mkGenericChunk(t, model.TimeFromUnix(i*10), 10, enc)) } testIter(t, 10*100, newIteratorAdapter(newNonOverlappingIterator(cs)), enc) diff --git a/pkg/querier/batch/stream_test.go b/pkg/querier/batch/stream_test.go index 2274cf7aa0..41148e890f 100644 --- a/pkg/querier/batch/stream_test.go +++ b/pkg/querier/batch/stream_test.go @@ -47,7 +47,6 @@ func TestStream(t *testing.T) { output: []promchunk.Batch{mkBatch(0, enc), mkBatch(promchunk.BatchSize, enc), mkBatch(2*promchunk.BatchSize, enc), mkBatch(3*promchunk.BatchSize, enc)}, }, } { - tc := tc t.Run(strconv.Itoa(i), func(t *testing.T) { t.Parallel() result := make(batchStream, len(tc.input1)+len(tc.input2)) @@ -60,7 +59,7 @@ func TestStream(t *testing.T) { func mkBatch(from int64, enc encoding.Encoding) promchunk.Batch { var result promchunk.Batch - for i := int64(0); i < promchunk.BatchSize; i++ { + for i := range int64(promchunk.BatchSize) { result.Timestamps[i] = from + i switch enc { case encoding.PrometheusXorChunk: @@ -91,13 +90,13 @@ func testHistogram(count, numSpans, numBuckets int) *histogram.Histogram { NegativeBuckets: make([]int64, bucketsPerSide), PositiveBuckets: make([]int64, bucketsPerSide), } - for j := 0; j < numSpans; j++ { + for j := range numSpans { s := histogram.Span{Offset: 1, Length: spanLength} h.NegativeSpans[j] = s h.PositiveSpans[j] = s } - for j := 0; j < bucketsPerSide; j++ { + for j := range bucketsPerSide { h.NegativeBuckets[j] = 1 h.PositiveBuckets[j] = 1 } diff --git a/pkg/querier/blocks_consistency_checker_test.go b/pkg/querier/blocks_consistency_checker_test.go index 2b3bce3bcf..5da829dbaf 100644 --- a/pkg/querier/blocks_consistency_checker_test.go +++ b/pkg/querier/blocks_consistency_checker_test.go @@ -103,7 +103,6 @@ func TestBlocksConsistencyChecker_Check(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { t.Parallel() diff --git a/pkg/querier/blocks_finder_bucket_index.go b/pkg/querier/blocks_finder_bucket_index.go index 60f05d722e..0f0977bf9c 100644 --- a/pkg/querier/blocks_finder_bucket_index.go +++ b/pkg/querier/blocks_finder_bucket_index.go @@ -8,6 +8,7 @@ import ( "github.com/oklog/ulid/v2" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/prometheus/model/labels" "github.com/thanos-io/objstore" "github.com/cortexproject/cortex/pkg/util/validation" @@ -49,7 +50,7 @@ func NewBucketIndexBlocksFinder(cfg BucketIndexBlocksFinderConfig, bkt objstore. } // GetBlocks implements BlocksFinder. -func (f *BucketIndexBlocksFinder) GetBlocks(ctx context.Context, userID string, minT, maxT int64) (bucketindex.Blocks, map[ulid.ULID]*bucketindex.BlockDeletionMark, error) { +func (f *BucketIndexBlocksFinder) GetBlocks(ctx context.Context, userID string, minT, maxT int64, _ []*labels.Matcher) (bucketindex.Blocks, map[ulid.ULID]*bucketindex.BlockDeletionMark, error) { if f.State() != services.Running { return nil, nil, errBucketIndexBlocksFinderNotRunning } diff --git a/pkg/querier/blocks_finder_bucket_index_test.go b/pkg/querier/blocks_finder_bucket_index_test.go index 280939c16c..d5404dbc8b 100644 --- a/pkg/querier/blocks_finder_bucket_index_test.go +++ b/pkg/querier/blocks_finder_bucket_index_test.go @@ -121,11 +121,10 @@ func TestBucketIndexBlocksFinder_GetBlocks(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { t.Parallel() - blocks, deletionMarks, err := finder.GetBlocks(ctx, userID, testData.minT, testData.maxT) + blocks, deletionMarks, err := finder.GetBlocks(ctx, userID, testData.minT, testData.maxT, nil) require.NoError(t, err) require.ElementsMatch(t, testData.expectedBlocks, blocks) require.Equal(t, testData.expectedMarks, deletionMarks) @@ -162,10 +161,8 @@ func BenchmarkBucketIndexBlocksFinder_GetBlocks(b *testing.B) { require.NoError(b, bucketindex.WriteIndex(ctx, bkt, userID, nil, idx)) finder := prepareBucketIndexBlocksFinder(b, bkt) - b.ResetTimer() - - for n := 0; n < b.N; n++ { - blocks, marks, err := finder.GetBlocks(ctx, userID, 100, 200) + for b.Loop() { + blocks, marks, err := finder.GetBlocks(ctx, userID, 100, 200, nil) if err != nil || len(blocks) != 11 || len(marks) != 11 { b.Fail() } @@ -181,7 +178,7 @@ func TestBucketIndexBlocksFinder_GetBlocks_BucketIndexDoesNotExist(t *testing.T) bkt, _ := cortex_testutil.PrepareFilesystemBucket(t) finder := prepareBucketIndexBlocksFinder(t, bkt) - blocks, deletionMarks, err := finder.GetBlocks(ctx, userID, 10, 20) + blocks, deletionMarks, err := finder.GetBlocks(ctx, userID, 10, 20, nil) require.NoError(t, err) assert.Empty(t, blocks) assert.Empty(t, deletionMarks) @@ -199,7 +196,7 @@ func TestBucketIndexBlocksFinder_GetBlocks_BucketIndexIsCorrupted(t *testing.T) // Upload a corrupted bucket index. require.NoError(t, bkt.Upload(ctx, path.Join(userID, bucketindex.IndexCompressedFilename), strings.NewReader("invalid}!"))) - _, _, err := finder.GetBlocks(ctx, userID, 10, 20) + _, _, err := finder.GetBlocks(ctx, userID, 10, 20, nil) require.Equal(t, bucketindex.ErrIndexCorrupted, err) } @@ -219,7 +216,7 @@ func TestBucketIndexBlocksFinder_GetBlocks_BucketIndexIsTooOld(t *testing.T) { UpdatedAt: time.Now().Add(-2 * time.Hour).Unix(), })) - _, _, err := finder.GetBlocks(ctx, userID, 10, 20) + _, _, err := finder.GetBlocks(ctx, userID, 10, 20, nil) require.Equal(t, errBucketIndexTooOld, err) } @@ -270,10 +267,10 @@ func TestBucketIndexBlocksFinder_GetBlocks_BucketIndexIsTooOldWithCustomerKeyErr t.Run(name, func(t *testing.T) { bucketindex.WriteSyncStatus(ctx, bkt, userID, tc.ss, log.NewNopLogger()) finder := prepareBucketIndexBlocksFinder(t, bkt) - _, _, err := finder.GetBlocks(ctx, userID, 10, 20) + _, _, err := finder.GetBlocks(ctx, userID, 10, 20, nil) require.Equal(t, tc.err, err) // Doing 2 times to return from the cache - _, _, err = finder.GetBlocks(ctx, userID, 10, 20) + _, _, err = finder.GetBlocks(ctx, userID, 10, 20, nil) require.Equal(t, tc.err, err) }) } @@ -315,7 +312,7 @@ func TestBucketIndexBlocksFinder_GetBlocks_KeyPermissionDenied(t *testing.T) { finder := prepareBucketIndexBlocksFinder(t, bkt) - _, _, err := finder.GetBlocks(context.Background(), userID, 0, 100) + _, _, err := finder.GetBlocks(context.Background(), userID, 0, 100, nil) expected := validation.AccessDeniedError("error") require.IsType(t, expected, err) } diff --git a/pkg/querier/blocks_finder_bucket_scan.go b/pkg/querier/blocks_finder_bucket_scan.go index 949ab5f635..aef1543cc9 100644 --- a/pkg/querier/blocks_finder_bucket_scan.go +++ b/pkg/querier/blocks_finder_bucket_scan.go @@ -2,6 +2,7 @@ package querier import ( "context" + "maps" "path" "path/filepath" "sort" @@ -15,6 +16,7 @@ import ( "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/prometheus/prometheus/model/labels" tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" "github.com/thanos-io/objstore" "github.com/thanos-io/thanos/pkg/block" @@ -111,7 +113,7 @@ func NewBucketScanBlocksFinder(cfg BucketScanBlocksFinderConfig, usersScanner us // GetBlocks returns known blocks for userID containing samples within the range minT // and maxT (milliseconds, both included). Returned blocks are sorted by MaxTime descending. -func (d *BucketScanBlocksFinder) GetBlocks(_ context.Context, userID string, minT, maxT int64) (bucketindex.Blocks, map[ulid.ULID]*bucketindex.BlockDeletionMark, error) { +func (d *BucketScanBlocksFinder) GetBlocks(_ context.Context, userID string, minT, maxT int64, _ []*labels.Matcher) (bucketindex.Blocks, map[ulid.ULID]*bucketindex.BlockDeletionMark, error) { // We need to ensure the initial full bucket scan succeeded. if d.State() != services.Running { return nil, nil, errBucketScanBlocksFinderNotRunning @@ -256,17 +258,11 @@ pushJobsLoop: } else { // If an error occurred, we prefer to partially update the metas map instead of // not updating it at all. At least we'll update blocks for the successful tenants. - for userID, metas := range resMetas { - d.userMetas[userID] = metas - } + maps.Copy(d.userMetas, resMetas) - for userID, metas := range resMetasLookup { - d.userMetasLookup[userID] = metas - } + maps.Copy(d.userMetasLookup, resMetasLookup) - for userID, deletionMarks := range resDeletionMarks { - d.userDeletionMarks[userID] = deletionMarks - } + maps.Copy(d.userDeletionMarks, resDeletionMarks) } d.userMx.Unlock() diff --git a/pkg/querier/blocks_finder_bucket_scan_test.go b/pkg/querier/blocks_finder_bucket_scan_test.go index 8393e4b12c..9313afffdf 100644 --- a/pkg/querier/blocks_finder_bucket_scan_test.go +++ b/pkg/querier/blocks_finder_bucket_scan_test.go @@ -39,7 +39,7 @@ func TestBucketScanBlocksFinder_InitialScan(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(ctx, s)) - blocks, deletionMarks, err := s.GetBlocks(ctx, "user-1", 0, 30) + blocks, deletionMarks, err := s.GetBlocks(ctx, "user-1", 0, 30, nil) require.NoError(t, err) require.Equal(t, 2, len(blocks)) assert.Equal(t, user1Block2.ULID, blocks[0].ID) @@ -48,7 +48,7 @@ func TestBucketScanBlocksFinder_InitialScan(t *testing.T) { assert.WithinDuration(t, time.Now(), blocks[1].GetUploadedAt(), 5*time.Second) assert.Empty(t, deletionMarks) - blocks, deletionMarks, err = s.GetBlocks(ctx, "user-2", 0, 30) + blocks, deletionMarks, err = s.GetBlocks(ctx, "user-2", 0, 30, nil) require.NoError(t, err) require.Equal(t, 1, len(blocks)) assert.Equal(t, user2Block1.ULID, blocks[0].ID) @@ -110,7 +110,7 @@ func TestBucketScanBlocksFinder_InitialScanFailure(t *testing.T) { require.NoError(t, s.StartAsync(ctx)) require.Error(t, s.AwaitRunning(ctx)) - blocks, deletionMarks, err := s.GetBlocks(ctx, "user-1", 0, 30) + blocks, deletionMarks, err := s.GetBlocks(ctx, "user-1", 0, 30, nil) assert.Equal(t, errBucketScanBlocksFinderNotRunning, err) assert.Nil(t, blocks) assert.Nil(t, deletionMarks) @@ -233,7 +233,7 @@ func TestBucketScanBlocksFinder_PeriodicScanFindsNewUser(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(ctx, s)) - blocks, deletionMarks, err := s.GetBlocks(ctx, "user-1", 0, 30) + blocks, deletionMarks, err := s.GetBlocks(ctx, "user-1", 0, 30, nil) require.NoError(t, err) require.Equal(t, 0, len(blocks)) assert.Empty(t, deletionMarks) @@ -245,7 +245,7 @@ func TestBucketScanBlocksFinder_PeriodicScanFindsNewUser(t *testing.T) { // Trigger a periodic sync require.NoError(t, s.scan(ctx)) - blocks, deletionMarks, err = s.GetBlocks(ctx, "user-1", 0, 30) + blocks, deletionMarks, err = s.GetBlocks(ctx, "user-1", 0, 30, nil) require.NoError(t, err) require.Equal(t, 2, len(blocks)) assert.Equal(t, block2.ULID, blocks[0].ID) @@ -266,7 +266,7 @@ func TestBucketScanBlocksFinder_PeriodicScanFindsNewBlock(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(ctx, s)) - blocks, deletionMarks, err := s.GetBlocks(ctx, "user-1", 0, 30) + blocks, deletionMarks, err := s.GetBlocks(ctx, "user-1", 0, 30, nil) require.NoError(t, err) require.Equal(t, 1, len(blocks)) assert.Equal(t, block1.ULID, blocks[0].ID) @@ -278,7 +278,7 @@ func TestBucketScanBlocksFinder_PeriodicScanFindsNewBlock(t *testing.T) { // Trigger a periodic sync require.NoError(t, s.scan(ctx)) - blocks, deletionMarks, err = s.GetBlocks(ctx, "user-1", 0, 30) + blocks, deletionMarks, err = s.GetBlocks(ctx, "user-1", 0, 30, nil) require.NoError(t, err) require.Equal(t, 2, len(blocks)) assert.Equal(t, block2.ULID, blocks[0].ID) @@ -298,7 +298,7 @@ func TestBucketScanBlocksFinder_PeriodicScanFindsBlockMarkedForDeletion(t *testi require.NoError(t, services.StartAndAwaitRunning(ctx, s)) - blocks, deletionMarks, err := s.GetBlocks(ctx, "user-1", 0, 30) + blocks, deletionMarks, err := s.GetBlocks(ctx, "user-1", 0, 30, nil) require.NoError(t, err) require.Equal(t, 2, len(blocks)) assert.Equal(t, block2.ULID, blocks[0].ID) @@ -310,7 +310,7 @@ func TestBucketScanBlocksFinder_PeriodicScanFindsBlockMarkedForDeletion(t *testi // Trigger a periodic sync require.NoError(t, s.scan(ctx)) - blocks, deletionMarks, err = s.GetBlocks(ctx, "user-1", 0, 30) + blocks, deletionMarks, err = s.GetBlocks(ctx, "user-1", 0, 30, nil) require.NoError(t, err) require.Equal(t, 2, len(blocks)) assert.Equal(t, block2.ULID, blocks[0].ID) @@ -330,7 +330,7 @@ func TestBucketScanBlocksFinder_PeriodicScanFindsDeletedBlock(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(ctx, s)) - blocks, deletionMarks, err := s.GetBlocks(ctx, "user-1", 0, 30) + blocks, deletionMarks, err := s.GetBlocks(ctx, "user-1", 0, 30, nil) require.NoError(t, err) require.Equal(t, 2, len(blocks)) assert.Equal(t, block2.ULID, blocks[0].ID) @@ -342,7 +342,7 @@ func TestBucketScanBlocksFinder_PeriodicScanFindsDeletedBlock(t *testing.T) { // Trigger a periodic sync require.NoError(t, s.scan(ctx)) - blocks, deletionMarks, err = s.GetBlocks(ctx, "user-1", 0, 30) + blocks, deletionMarks, err = s.GetBlocks(ctx, "user-1", 0, 30, nil) require.NoError(t, err) require.Equal(t, 1, len(blocks)) assert.Equal(t, block2.ULID, blocks[0].ID) @@ -359,7 +359,7 @@ func TestBucketScanBlocksFinder_PeriodicScanFindsDeletedUser(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(ctx, s)) - blocks, deletionMarks, err := s.GetBlocks(ctx, "user-1", 0, 30) + blocks, deletionMarks, err := s.GetBlocks(ctx, "user-1", 0, 30, nil) require.NoError(t, err) require.Equal(t, 2, len(blocks)) assert.Equal(t, block2.ULID, blocks[0].ID) @@ -371,7 +371,7 @@ func TestBucketScanBlocksFinder_PeriodicScanFindsDeletedUser(t *testing.T) { // Trigger a periodic sync require.NoError(t, s.scan(ctx)) - blocks, deletionMarks, err = s.GetBlocks(ctx, "user-1", 0, 30) + blocks, deletionMarks, err = s.GetBlocks(ctx, "user-1", 0, 30, nil) require.NoError(t, err) require.Equal(t, 0, len(blocks)) assert.Empty(t, deletionMarks) @@ -387,7 +387,7 @@ func TestBucketScanBlocksFinder_PeriodicScanFindsUserWhichWasPreviouslyDeleted(t require.NoError(t, services.StartAndAwaitRunning(ctx, s)) - blocks, deletionMarks, err := s.GetBlocks(ctx, "user-1", 0, 40) + blocks, deletionMarks, err := s.GetBlocks(ctx, "user-1", 0, 40, nil) require.NoError(t, err) require.Equal(t, 2, len(blocks)) assert.Equal(t, block2.ULID, blocks[0].ID) @@ -399,7 +399,7 @@ func TestBucketScanBlocksFinder_PeriodicScanFindsUserWhichWasPreviouslyDeleted(t // Trigger a periodic sync require.NoError(t, s.scan(ctx)) - blocks, deletionMarks, err = s.GetBlocks(ctx, "user-1", 0, 40) + blocks, deletionMarks, err = s.GetBlocks(ctx, "user-1", 0, 40, nil) require.NoError(t, err) require.Equal(t, 0, len(blocks)) assert.Empty(t, deletionMarks) @@ -409,7 +409,7 @@ func TestBucketScanBlocksFinder_PeriodicScanFindsUserWhichWasPreviouslyDeleted(t // Trigger a periodic sync require.NoError(t, s.scan(ctx)) - blocks, deletionMarks, err = s.GetBlocks(ctx, "user-1", 0, 40) + blocks, deletionMarks, err = s.GetBlocks(ctx, "user-1", 0, 40, nil) require.NoError(t, err) require.Equal(t, 1, len(blocks)) assert.Equal(t, block3.ULID, blocks[0].ID) @@ -502,11 +502,10 @@ func TestBucketScanBlocksFinder_GetBlocks(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { t.Parallel() - metas, deletionMarks, err := s.GetBlocks(ctx, "user-1", testData.minT, testData.maxT) + metas, deletionMarks, err := s.GetBlocks(ctx, "user-1", testData.minT, testData.maxT, nil) require.NoError(t, err) require.Equal(t, len(testData.expectedMetas), len(metas)) require.Equal(t, testData.expectedMarks, deletionMarks) diff --git a/pkg/querier/blocks_store_balanced_set.go b/pkg/querier/blocks_store_balanced_set.go index b69f9cf439..3967df03e4 100644 --- a/pkg/querier/blocks_store_balanced_set.go +++ b/pkg/querier/blocks_store_balanced_set.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "math/rand" + "slices" "strings" "time" @@ -16,7 +17,6 @@ import ( "github.com/thanos-io/thanos/pkg/extprom" "github.com/cortexproject/cortex/pkg/ring/client" - "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/services" ) @@ -94,7 +94,7 @@ func (s *blocksStoreBalancedSet) GetClientsFor(_ string, blockIDs []ulid.ULID, e func getFirstNonExcludedAddr(addresses, exclude []string) string { for _, addr := range addresses { - if !util.StringsContain(exclude, addr) { + if !slices.Contains(exclude, addr) { return addr } } diff --git a/pkg/querier/blocks_store_balanced_set_test.go b/pkg/querier/blocks_store_balanced_set_test.go index 6de7a105fd..0af9719eb3 100644 --- a/pkg/querier/blocks_store_balanced_set_test.go +++ b/pkg/querier/blocks_store_balanced_set_test.go @@ -33,7 +33,7 @@ func TestBlocksStoreBalancedSet_GetClientsFor(t *testing.T) { // of returned clients (we expect an even distribution). clientsCount := map[string]int{} - for i := 0; i < numGets; i++ { + for range numGets { clients, err := s.GetClientsFor("", []ulid.ULID{block1}, map[ulid.ULID][]string{}, nil) require.NoError(t, err) require.Len(t, clients, 1) @@ -131,7 +131,6 @@ func TestBlocksStoreBalancedSet_GetClientsFor_Exclude(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { t.Parallel() diff --git a/pkg/querier/blocks_store_queryable.go b/pkg/querier/blocks_store_queryable.go index 98b58a8336..8c7ae1f41f 100644 --- a/pkg/querier/blocks_store_queryable.go +++ b/pkg/querier/blocks_store_queryable.go @@ -86,7 +86,7 @@ type BlocksFinder interface { // GetBlocks returns known blocks for userID containing samples within the range minT // and maxT (milliseconds, both included). Returned blocks are sorted by MaxTime descending. - GetBlocks(ctx context.Context, userID string, minT, maxT int64) (bucketindex.Blocks, map[ulid.ULID]*bucketindex.BlockDeletionMark, error) + GetBlocks(ctx context.Context, userID string, minT, maxT int64, matchers []*labels.Matcher) (bucketindex.Blocks, map[ulid.ULID]*bucketindex.BlockDeletionMark, error) } // BlocksStoreClient is the interface that should be implemented by any client used @@ -373,7 +373,7 @@ func (q *blocksStoreQuerier) LabelNames(ctx context.Context, hints *storage.Labe return queriedBlocks, nil, retryableError } - if err := q.queryWithConsistencyCheck(spanCtx, spanLog, minT, maxT, userID, queryFunc); err != nil { + if err := q.queryWithConsistencyCheck(spanCtx, spanLog, minT, maxT, matchers, userID, queryFunc); err != nil { return nil, nil, err } @@ -416,7 +416,7 @@ func (q *blocksStoreQuerier) LabelValues(ctx context.Context, name string, hints return queriedBlocks, nil, retryableError } - if err := q.queryWithConsistencyCheck(spanCtx, spanLog, minT, maxT, userID, queryFunc); err != nil { + if err := q.queryWithConsistencyCheck(spanCtx, spanLog, minT, maxT, matchers, userID, queryFunc); err != nil { return nil, nil, err } @@ -472,7 +472,7 @@ func (q *blocksStoreQuerier) selectSorted(ctx context.Context, sp *storage.Selec return queriedBlocks, nil, retryableError } - if err := q.queryWithConsistencyCheck(spanCtx, spanLog, minT, maxT, userID, queryFunc); err != nil { + if err := q.queryWithConsistencyCheck(spanCtx, spanLog, minT, maxT, matchers, userID, queryFunc); err != nil { return storage.ErrSeriesSet(err) } @@ -485,8 +485,8 @@ func (q *blocksStoreQuerier) selectSorted(ctx context.Context, sp *storage.Selec resWarnings) } -func (q *blocksStoreQuerier) queryWithConsistencyCheck(ctx context.Context, logger log.Logger, minT, maxT int64, userID string, - queryFunc func(clients map[BlocksStoreClient][]ulid.ULID, minT, maxT int64) ([]ulid.ULID, error, error)) error { +func (q *blocksStoreQuerier) queryWithConsistencyCheck(ctx context.Context, logger log.Logger, minT, maxT int64, matchers []*labels.Matcher, + userID string, queryFunc func(clients map[BlocksStoreClient][]ulid.ULID, minT, maxT int64) ([]ulid.ULID, error, error)) error { // If queryStoreAfter is enabled, we do manipulate the query maxt to query samples up until // now - queryStoreAfter, because the most recent time range is covered by ingesters. This // optimization is particularly important for the blocks storage because can be used to skip @@ -508,7 +508,7 @@ func (q *blocksStoreQuerier) queryWithConsistencyCheck(ctx context.Context, logg } // Find the list of blocks we need to query given the time range. - knownBlocks, knownDeletionMarks, err := q.finder.GetBlocks(ctx, userID, minT, maxT) + knownBlocks, knownDeletionMarks, err := q.finder.GetBlocks(ctx, userID, minT, maxT, matchers) // if blocks were already discovered, we should use then if b, ok := ExtractBlocksFromContext(ctx); ok { @@ -637,8 +637,6 @@ func (q *blocksStoreQuerier) fetchSeriesFromStores( // Concurrently fetch series from all clients. for c, blockIDs := range clients { // Change variables scope since it will be used in a goroutine. - c := c - blockIDs := blockIDs g.Go(func() error { // See: https://github.com/prometheus/prometheus/pull/8050 @@ -860,8 +858,6 @@ func (q *blocksStoreQuerier) fetchLabelNamesFromStore( // Concurrently fetch series from all clients. for c, blockIDs := range clients { // Change variables scope since it will be used in a goroutine. - c := c - blockIDs := blockIDs g.Go(func() error { req, err := createLabelNamesRequest(minT, maxT, limit, blockIDs, matchers) @@ -967,8 +963,6 @@ func (q *blocksStoreQuerier) fetchLabelValuesFromStore( // Concurrently fetch series from all clients. for c, blockIDs := range clients { // Change variables scope since it will be used in a goroutine. - c := c - blockIDs := blockIDs g.Go(func() error { req, err := createLabelValuesRequest(minT, maxT, limit, name, blockIDs, matchers...) @@ -1204,17 +1198,11 @@ func countSamplesAndChunks(series ...*storepb.Series) (samplesCount, chunksCount // only retry connection issues func isRetryableError(err error) bool { - // retry upon resource exhaustion error from resource monitor - var resourceExhaustedErr *limiter.ResourceLimitReachedError - if errors.As(err, &resourceExhaustedErr) { - return true - } - switch status.Code(err) { case codes.Unavailable: return true case codes.ResourceExhausted: - return errors.Is(err, storegateway.ErrTooManyInflightRequests) + return errors.Is(err, storegateway.ErrTooManyInflightRequests) || errors.Is(err, limiter.ErrResourceLimitReached) // Client side connection closing, this error happens during store gateway deployment. // https://github.com/grpc/grpc-go/blob/03172006f5d168fc646d87928d85cb9c4a480291/clientconn.go#L67 case codes.Canceled: diff --git a/pkg/querier/blocks_store_queryable_test.go b/pkg/querier/blocks_store_queryable_test.go index da0a5df267..6db0ab6a12 100644 --- a/pkg/querier/blocks_store_queryable_test.go +++ b/pkg/querier/blocks_store_queryable_test.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "io" + "net/http" "sort" "strings" "testing" @@ -31,6 +32,7 @@ import ( "github.com/thanos-io/thanos/pkg/store/hintspb" "github.com/thanos-io/thanos/pkg/store/labelpb" "github.com/thanos-io/thanos/pkg/store/storepb" + "github.com/weaveworks/common/httpgrpc" "github.com/weaveworks/common/user" "google.golang.org/grpc" "google.golang.org/grpc/codes" @@ -89,7 +91,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { tests := map[string]struct { finderResult bucketindex.Blocks finderErr error - storeSetResponses []interface{} + storeSetResponses []any limits BlocksStoreLimits queryLimiter *limiter.QueryLimiter seriesLimit int @@ -114,7 +116,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ errors.New("no client found"), }, limits: &blocksStoreLimitsMock{}, @@ -126,10 +128,10 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel}, []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value), []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}, nil, nil), mockHintsResponse(block1, block2), }}: {block1, block2}, }, @@ -151,11 +153,11 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ mockSeriesResponse( - labels.Labels{metricNameLabel}, + labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value), nil, []cortexpb.Histogram{ cortexpb.HistogramToHistogramProto(minT, testHistogram1), @@ -183,11 +185,11 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ mockSeriesResponse( - labels.Labels{metricNameLabel}, + labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value), nil, nil, []cortexpb.Histogram{ cortexpb.FloatHistogramToHistogramProto(minT, testFloatHistogram1), @@ -215,11 +217,11 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}, nil, nil), - mockSeriesResponse(labels.Labels{metricNameLabel, series2Label}, []cortexpb.Sample{{Value: 3, TimestampMs: minT}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series2Label.Name, series2Label.Value), []cortexpb.Sample{{Value: 3, TimestampMs: minT}}, nil, nil), mockHintsResponse(block1, block2), }}: {block1, block2}, }, @@ -246,11 +248,11 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ mockSeriesResponse( - labels.Labels{metricNameLabel, series1Label}, + labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), nil, []cortexpb.Histogram{ cortexpb.HistogramToHistogramProto(minT, testHistogram1), @@ -258,7 +260,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { }, nil, ), mockSeriesResponse( - labels.Labels{metricNameLabel, series2Label}, + labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series2Label.Name, series2Label.Value), nil, []cortexpb.Histogram{ cortexpb.HistogramToHistogramProto(minT, testHistogram3), @@ -290,11 +292,11 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ mockSeriesResponse( - labels.Labels{metricNameLabel, series1Label}, + labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), nil, nil, []cortexpb.Histogram{ cortexpb.FloatHistogramToHistogramProto(minT, testFloatHistogram1), @@ -302,7 +304,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { }, ), mockSeriesResponse( - labels.Labels{metricNameLabel, series2Label}, + labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series2Label.Name, series2Label.Value), nil, nil, []cortexpb.Histogram{ cortexpb.FloatHistogramToHistogramProto(minT, testFloatHistogram3), @@ -334,14 +336,14 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel}, []cortexpb.Sample{{Value: 1, TimestampMs: minT}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value), []cortexpb.Sample{{Value: 1, TimestampMs: minT}}, nil, nil), mockHintsResponse(block1), }}: {block1}, &storeGatewayClientMock{remoteAddr: "2.2.2.2", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel}, []cortexpb.Sample{{Value: 2, TimestampMs: minT + 1}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value), []cortexpb.Sample{{Value: 2, TimestampMs: minT + 1}}, nil, nil), mockHintsResponse(block2), }}: {block2}, }, @@ -363,11 +365,11 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ mockSeriesResponse( - labels.Labels{metricNameLabel}, + labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value), nil, []cortexpb.Histogram{ cortexpb.HistogramToHistogramProto(minT, testHistogram1), @@ -377,7 +379,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { }}: {block1}, &storeGatewayClientMock{remoteAddr: "2.2.2.2", mockedSeriesResponses: []*storepb.SeriesResponse{ mockSeriesResponse( - labels.Labels{metricNameLabel}, + labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value), nil, []cortexpb.Histogram{ cortexpb.HistogramToHistogramProto(minT+1, testHistogram2), @@ -404,11 +406,11 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ mockSeriesResponse( - labels.Labels{metricNameLabel}, + labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value), nil, nil, []cortexpb.Histogram{ cortexpb.FloatHistogramToHistogramProto(minT, testFloatHistogram1), @@ -418,7 +420,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { }}: {block1}, &storeGatewayClientMock{remoteAddr: "2.2.2.2", mockedSeriesResponses: []*storepb.SeriesResponse{ mockSeriesResponse( - labels.Labels{metricNameLabel}, + labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value), nil, nil, []cortexpb.Histogram{ cortexpb.FloatHistogramToHistogramProto(minT+1, testFloatHistogram2), @@ -445,14 +447,14 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel}, []cortexpb.Sample{{Value: 2, TimestampMs: minT + 1}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value), []cortexpb.Sample{{Value: 2, TimestampMs: minT + 1}}, nil, nil), mockHintsResponse(block1), }}: {block1}, &storeGatewayClientMock{remoteAddr: "2.2.2.2", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel}, []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value), []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}, nil, nil), mockHintsResponse(block2), }}: {block2}, }, @@ -474,11 +476,11 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ mockSeriesResponse( - labels.Labels{metricNameLabel}, + labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value), nil, []cortexpb.Histogram{ cortexpb.HistogramToHistogramProto(minT+1, testHistogram2), @@ -488,7 +490,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { }}: {block1}, &storeGatewayClientMock{remoteAddr: "2.2.2.2", mockedSeriesResponses: []*storepb.SeriesResponse{ mockSeriesResponse( - labels.Labels{metricNameLabel}, + labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value), nil, []cortexpb.Histogram{ cortexpb.HistogramToHistogramProto(minT, testHistogram1), @@ -516,11 +518,11 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ mockSeriesResponse( - labels.Labels{metricNameLabel}, + labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value), nil, nil, []cortexpb.Histogram{ cortexpb.FloatHistogramToHistogramProto(minT+1, testFloatHistogram2), @@ -530,7 +532,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { }}: {block1}, &storeGatewayClientMock{remoteAddr: "2.2.2.2", mockedSeriesResponses: []*storepb.SeriesResponse{ mockSeriesResponse( - labels.Labels{metricNameLabel}, + labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value), nil, nil, []cortexpb.Histogram{ cortexpb.FloatHistogramToHistogramProto(minT, testFloatHistogram1), @@ -558,19 +560,19 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 2, TimestampMs: minT + 1}}, nil, nil), - mockSeriesResponse(labels.Labels{metricNameLabel, series2Label}, []cortexpb.Sample{{Value: 1, TimestampMs: minT}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), []cortexpb.Sample{{Value: 2, TimestampMs: minT + 1}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series2Label.Name, series2Label.Value), []cortexpb.Sample{{Value: 1, TimestampMs: minT}}, nil, nil), mockHintsResponse(block1), }}: {block1}, &storeGatewayClientMock{remoteAddr: "2.2.2.2", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}, nil, nil), mockHintsResponse(block2), }}: {block2}, &storeGatewayClientMock{remoteAddr: "3.3.3.3", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series2Label}, []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 3, TimestampMs: minT + 1}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series2Label.Name, series2Label.Value), []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 3, TimestampMs: minT + 1}}, nil, nil), mockHintsResponse(block3), }}: {block3}, }, @@ -628,19 +630,19 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 2, TimestampMs: minT + 1}}, nil, nil), - mockSeriesResponse(labels.Labels{metricNameLabel, series2Label}, []cortexpb.Sample{{Value: 1, TimestampMs: minT}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), []cortexpb.Sample{{Value: 2, TimestampMs: minT + 1}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series2Label.Name, series2Label.Value), []cortexpb.Sample{{Value: 1, TimestampMs: minT}}, nil, nil), mockHintsResponse(block1), }}: {block1}, &storeGatewayClientMock{remoteAddr: "2.2.2.2", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}, nil, nil), mockHintsResponse(block2), }}: {block2}, &storeGatewayClientMock{remoteAddr: "3.3.3.3", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series2Label}, []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 3, TimestampMs: minT + 1}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series2Label.Name, series2Label.Value), []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 3, TimestampMs: minT + 1}}, nil, nil), mockHintsResponse(block3), }}: {block3}, }, @@ -662,18 +664,18 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ mockSeriesResponse( - labels.Labels{metricNameLabel, series1Label}, + labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), nil, []cortexpb.Histogram{ cortexpb.HistogramToHistogramProto(minT+1, testHistogram2), }, nil, ), mockSeriesResponse( - labels.Labels{metricNameLabel, series2Label}, + labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series2Label.Name, series2Label.Value), nil, []cortexpb.Histogram{ cortexpb.HistogramToHistogramProto(minT, testHistogram1), @@ -683,7 +685,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { }}: {block1}, &storeGatewayClientMock{remoteAddr: "2.2.2.2", mockedSeriesResponses: []*storepb.SeriesResponse{ mockSeriesResponse( - labels.Labels{metricNameLabel, series1Label}, + labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), nil, []cortexpb.Histogram{ cortexpb.HistogramToHistogramProto(minT, testHistogram1), @@ -694,7 +696,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { }}: {block2}, &storeGatewayClientMock{remoteAddr: "3.3.3.3", mockedSeriesResponses: []*storepb.SeriesResponse{ mockSeriesResponse( - labels.Labels{metricNameLabel, series2Label}, + labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series2Label.Name, series2Label.Value), nil, []cortexpb.Histogram{ cortexpb.HistogramToHistogramProto(minT, testHistogram1), @@ -728,18 +730,18 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ mockSeriesResponse( - labels.Labels{metricNameLabel, series1Label}, + labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), nil, nil, []cortexpb.Histogram{ cortexpb.FloatHistogramToHistogramProto(minT+1, testFloatHistogram2), }, ), mockSeriesResponse( - labels.Labels{metricNameLabel, series2Label}, + labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series2Label.Name, series2Label.Value), nil, nil, []cortexpb.Histogram{ cortexpb.FloatHistogramToHistogramProto(minT, testFloatHistogram1), @@ -749,7 +751,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { }}: {block1}, &storeGatewayClientMock{remoteAddr: "2.2.2.2", mockedSeriesResponses: []*storepb.SeriesResponse{ mockSeriesResponse( - labels.Labels{metricNameLabel, series1Label}, + labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), nil, nil, []cortexpb.Histogram{ cortexpb.FloatHistogramToHistogramProto(minT, testFloatHistogram1), @@ -760,7 +762,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { }}: {block2}, &storeGatewayClientMock{remoteAddr: "3.3.3.3", mockedSeriesResponses: []*storepb.SeriesResponse{ mockSeriesResponse( - labels.Labels{metricNameLabel, series2Label}, + labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series2Label.Name, series2Label.Value), nil, nil, []cortexpb.Histogram{ cortexpb.FloatHistogramToHistogramProto(minT, testFloatHistogram1), @@ -794,11 +796,11 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ // First attempt returns a client whose response does not include all expected blocks. map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}, nil, nil), mockHintsResponse(block1), }}: {block1}, }, @@ -816,15 +818,15 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block3}, &bucketindex.Block{ID: block4}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ // First attempt returns a client whose response does not include all expected blocks. map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel}, []cortexpb.Sample{{Value: 2, TimestampMs: minT + 1}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value), []cortexpb.Sample{{Value: 2, TimestampMs: minT + 1}}, nil, nil), mockHintsResponse(block1), }}: {block1}, &storeGatewayClientMock{remoteAddr: "2.2.2.2", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel}, []cortexpb.Sample{{Value: 2, TimestampMs: minT + 1}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value), []cortexpb.Sample{{Value: 2, TimestampMs: minT + 1}}, nil, nil), mockHintsResponse(block2), }}: {block2}, }, @@ -842,29 +844,29 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block3}, &bucketindex.Block{ID: block4}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ // First attempt returns a client whose response does not include all expected blocks. map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 1, TimestampMs: minT}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), []cortexpb.Sample{{Value: 1, TimestampMs: minT}}, nil, nil), mockHintsResponse(block1), }}: {block1, block3}, &storeGatewayClientMock{remoteAddr: "2.2.2.2", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series2Label}, []cortexpb.Sample{{Value: 2, TimestampMs: minT}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series2Label.Name, series2Label.Value), []cortexpb.Sample{{Value: 2, TimestampMs: minT}}, nil, nil), mockHintsResponse(block2), }}: {block2, block4}, }, // Second attempt returns 1 missing block. map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "3.3.3.3", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 2, TimestampMs: minT + 1}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), []cortexpb.Sample{{Value: 2, TimestampMs: minT + 1}}, nil, nil), mockHintsResponse(block3), }}: {block3, block4}, }, // Third attempt returns the last missing block. map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "4.4.4.4", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series2Label}, []cortexpb.Sample{{Value: 3, TimestampMs: minT + 1}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series2Label.Name, series2Label.Value), []cortexpb.Sample{{Value: 3, TimestampMs: minT + 1}}, nil, nil), mockHintsResponse(block4), }}: {block4}, }, @@ -921,10 +923,10 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}, nil, nil), mockHintsResponse(block1, block2), }}: {block1, block2}, }, @@ -946,10 +948,10 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}, nil, nil), mockHintsResponse(block1, block2), }}: {block1, block2}, }, @@ -963,10 +965,10 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, nil, []cortexpb.Histogram{ + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), nil, []cortexpb.Histogram{ cortexpb.HistogramToHistogramProto(minT, testHistogram1), cortexpb.HistogramToHistogramProto(minT+1, testHistogram2), }, nil), @@ -983,10 +985,10 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, nil, nil, []cortexpb.Histogram{ + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), nil, nil, []cortexpb.Histogram{ cortexpb.FloatHistogramToHistogramProto(minT, testFloatHistogram1), cortexpb.FloatHistogramToHistogramProto(minT+1, testFloatHistogram2), }), @@ -1003,10 +1005,10 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}, nil, nil), mockHintsResponse(block1, block2), }}: {block1, block2}, }, @@ -1020,10 +1022,10 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, nil, []cortexpb.Histogram{ + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), nil, []cortexpb.Histogram{ cortexpb.HistogramToHistogramProto(minT, testHistogram1), cortexpb.HistogramToHistogramProto(minT+1, testHistogram2), }, nil), @@ -1040,10 +1042,10 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, nil, nil, []cortexpb.Histogram{ + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), nil, nil, []cortexpb.Histogram{ cortexpb.FloatHistogramToHistogramProto(minT, testFloatHistogram1), cortexpb.FloatHistogramToHistogramProto(minT+1, testFloatHistogram2), }), @@ -1062,29 +1064,29 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block3}, &bucketindex.Block{ID: block4}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ // First attempt returns a client whose response does not include all expected blocks. map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 1, TimestampMs: minT}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), []cortexpb.Sample{{Value: 1, TimestampMs: minT}}, nil, nil), mockHintsResponse(block1), }}: {block1, block3}, &storeGatewayClientMock{remoteAddr: "2.2.2.2", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series2Label}, []cortexpb.Sample{{Value: 2, TimestampMs: minT}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series2Label.Name, series2Label.Value), []cortexpb.Sample{{Value: 2, TimestampMs: minT}}, nil, nil), mockHintsResponse(block2), }}: {block2, block4}, }, // Second attempt returns 1 missing block. map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "3.3.3.3", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 2, TimestampMs: minT + 1}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), []cortexpb.Sample{{Value: 2, TimestampMs: minT + 1}}, nil, nil), mockHintsResponse(block3), }}: {block3, block4}, }, // Third attempt returns the last missing block. map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "4.4.4.4", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series2Label}, []cortexpb.Sample{{Value: 3, TimestampMs: minT + 1}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), []cortexpb.Sample{{Value: 3, TimestampMs: minT + 1}}, nil, nil), mockHintsResponse(block4), }}: {block4}, }, @@ -1100,29 +1102,29 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block3}, &bucketindex.Block{ID: block4}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ // First attempt returns a client whose response does not include all expected blocks. map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 1, TimestampMs: minT}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), []cortexpb.Sample{{Value: 1, TimestampMs: minT}}, nil, nil), mockHintsResponse(block1), }}: {block1, block3}, &storeGatewayClientMock{remoteAddr: "2.2.2.2", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series2Label}, []cortexpb.Sample{{Value: 2, TimestampMs: minT}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series2Label.Name, series2Label.Value), []cortexpb.Sample{{Value: 2, TimestampMs: minT}}, nil, nil), mockHintsResponse(block2), }}: {block2, block4}, }, // Second attempt returns 1 missing block. map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "3.3.3.3", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 2, TimestampMs: minT + 1}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), []cortexpb.Sample{{Value: 2, TimestampMs: minT + 1}}, nil, nil), mockHintsResponse(block3), }}: {block3, block4}, }, // Third attempt returns the last missing block. map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "4.4.4.4", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series2Label}, []cortexpb.Sample{{Value: 3, TimestampMs: minT + 1}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series2Label.Name, series2Label.Value), []cortexpb.Sample{{Value: 3, TimestampMs: minT + 1}}, nil, nil), mockHintsResponse(block4), }}: {block4}, }, @@ -1136,11 +1138,11 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 1, TimestampMs: minT}}, nil, nil), - mockSeriesResponse(labels.Labels{metricNameLabel, series2Label}, []cortexpb.Sample{{Value: 2, TimestampMs: minT + 1}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), []cortexpb.Sample{{Value: 1, TimestampMs: minT}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series2Label.Name, series2Label.Value), []cortexpb.Sample{{Value: 2, TimestampMs: minT + 1}}, nil, nil), mockHintsResponse(block1, block2), }}: {block1, block2}, }, @@ -1154,15 +1156,15 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, nil, + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), nil, []cortexpb.Histogram{ cortexpb.HistogramToHistogramProto(minT, testHistogram1), }, nil, ), - mockSeriesResponse(labels.Labels{metricNameLabel, series2Label}, nil, + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series2Label.Name, series2Label.Value), nil, []cortexpb.Histogram{ cortexpb.HistogramToHistogramProto(minT+1, testHistogram2), }, nil, @@ -1180,15 +1182,15 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, nil, nil, + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), nil, nil, []cortexpb.Histogram{ cortexpb.FloatHistogramToHistogramProto(minT, testFloatHistogram1), }, ), - mockSeriesResponse(labels.Labels{metricNameLabel, series2Label}, nil, nil, + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series2Label.Name, series2Label.Value), nil, nil, []cortexpb.Histogram{ cortexpb.FloatHistogramToHistogramProto(minT+1, testFloatHistogram2), }, @@ -1206,10 +1208,10 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}, nil, nil), mockHintsResponse(block1, block2), }}: {block1, block2}, }, @@ -1223,10 +1225,10 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, nil, []cortexpb.Histogram{ + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), nil, []cortexpb.Histogram{ cortexpb.HistogramToHistogramProto(minT, testHistogram1), }, nil), mockHintsResponse(block1, block2), @@ -1242,10 +1244,10 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, nil, nil, []cortexpb.Histogram{ + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), nil, nil, []cortexpb.Histogram{ cortexpb.FloatHistogramToHistogramProto(minT, testFloatHistogram1), }), mockHintsResponse(block1, block2), @@ -1261,10 +1263,10 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}, nil, nil), mockHintsResponse(block1, block2), }}: {block1, block2}, }, @@ -1278,10 +1280,10 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, nil, []cortexpb.Histogram{ + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), nil, []cortexpb.Histogram{ cortexpb.HistogramToHistogramProto(minT, testHistogram1), }, nil), mockHintsResponse(block1, block2), @@ -1297,10 +1299,10 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, nil, nil, []cortexpb.Histogram{ + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), nil, nil, []cortexpb.Histogram{ cortexpb.FloatHistogramToHistogramProto(minT, testFloatHistogram1), }), mockHintsResponse(block1, block2), @@ -1315,7 +1317,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { finderResult: bucketindex.Blocks{ &bucketindex.Block{ID: block1}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{ remoteAddr: "1.1.1.1", @@ -1324,7 +1326,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { }, map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "2.2.2.2", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 2, TimestampMs: minT}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), []cortexpb.Sample{{Value: 2, TimestampMs: minT}}, nil, nil), mockHintsResponse(block1), }}: {block1}, }, @@ -1344,7 +1346,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { finderResult: bucketindex.Blocks{ &bucketindex.Block{ID: block1}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{ remoteAddr: "1.1.1.1", @@ -1353,7 +1355,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { }, map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "2.2.2.2", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 2, TimestampMs: minT}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), []cortexpb.Sample{{Value: 2, TimestampMs: minT}}, nil, nil), mockHintsResponse(block1), }}: {block1}, }, @@ -1373,7 +1375,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { finderResult: bucketindex.Blocks{ &bucketindex.Block{ID: block1}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{ remoteAddr: "1.1.1.1", @@ -1382,7 +1384,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { }, map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "2.2.2.2", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 2, TimestampMs: minT}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), []cortexpb.Sample{{Value: 2, TimestampMs: minT}}, nil, nil), mockHintsResponse(block1), }}: {block1}, }, @@ -1403,12 +1405,12 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &bucketindex.Block{ID: block1}, }, expectedErr: validation.AccessDeniedError("PermissionDenied"), - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{ remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 2, TimestampMs: minT}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), []cortexpb.Sample{{Value: 2, TimestampMs: minT}}, nil, nil), mockHintsResponse(block1), }, mockedSeriesStreamErr: status.Error(codes.PermissionDenied, "PermissionDenied"), @@ -1418,7 +1420,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &storeGatewayClientMock{ remoteAddr: "2.2.2.2", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 2, TimestampMs: minT}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), []cortexpb.Sample{{Value: 2, TimestampMs: minT}}, nil, nil), mockHintsResponse(block1), }, mockedSeriesStreamErr: status.Error(codes.PermissionDenied, "PermissionDenied"), @@ -1440,19 +1442,19 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { finderResult: bucketindex.Blocks{ &bucketindex.Block{ID: block1}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{ remoteAddr: "1.1.1.1", mockedSeriesStreamErr: status.Error(codes.Unavailable, "unavailable"), mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 2, TimestampMs: minT}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), []cortexpb.Sample{{Value: 2, TimestampMs: minT}}, nil, nil), mockHintsResponse(block1), }}: {block1}, }, map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "2.2.2.2", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 2, TimestampMs: minT}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), []cortexpb.Sample{{Value: 2, TimestampMs: minT}}, nil, nil), mockHintsResponse(block1), }}: {block1}, }, @@ -1472,7 +1474,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { finderResult: bucketindex.Blocks{ &bucketindex.Block{ID: block1}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{ remoteAddr: "1.1.1.1", @@ -1481,7 +1483,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { }, map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "2.2.2.2", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 2, TimestampMs: minT}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), []cortexpb.Sample{{Value: 2, TimestampMs: minT}}, nil, nil), mockHintsResponse(block1), }}: {block1}, }, @@ -1501,7 +1503,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { finderResult: bucketindex.Blocks{ &bucketindex.Block{ID: block1}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{ remoteAddr: "1.1.1.1", @@ -1510,7 +1512,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { }, map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "2.2.2.2", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 2, TimestampMs: minT}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), []cortexpb.Sample{{Value: 2, TimestampMs: minT}}, nil, nil), mockHintsResponse(block1), }}: {block1}, }, @@ -1522,16 +1524,16 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { finderResult: bucketindex.Blocks{ &bucketindex.Block{ID: block1}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{ remoteAddr: "1.1.1.1", - mockedSeriesErr: &limiter.ResourceLimitReachedError{}, + mockedSeriesErr: limiter.ErrResourceLimitReached, }: {block1}, }, map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "2.2.2.2", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 2, TimestampMs: minT}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), []cortexpb.Sample{{Value: 2, TimestampMs: minT}}, nil, nil), mockHintsResponse(block1), }}: {block1}, }, @@ -1550,7 +1552,6 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { t.Parallel() @@ -1568,7 +1569,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { reg := prometheus.NewPedanticRegistry() stores := &blocksStoreSetMock{mockedResponses: testData.storeSetResponses} finder := &blocksFinderMock{} - finder.On("GetBlocks", mock.Anything, "user-1", minT, maxT).Return(testData.finderResult, map[ulid.ULID]*bucketindex.BlockDeletionMark(nil), testData.finderErr) + finder.On("GetBlocks", mock.Anything, "user-1", minT, maxT, mock.Anything).Return(testData.finderResult, map[ulid.ULID]*bucketindex.BlockDeletionMark(nil), testData.finderErr) q := &blocksStoreQuerier{ minT: minT, @@ -1654,7 +1655,7 @@ func TestOverrideBlockDiscovery(t *testing.T) { minT := int64(10) maxT := int64(20) - stores := &blocksStoreSetMock{mockedResponses: []interface{}{ + stores := &blocksStoreSetMock{mockedResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ mockHintsResponse(block1), @@ -1664,7 +1665,7 @@ func TestOverrideBlockDiscovery(t *testing.T) { } finder := &blocksFinderMock{} // return block 1 and 2 on finder but only query block 1 - finder.On("GetBlocks", mock.Anything, "user-1", minT, maxT).Return(bucketindex.Blocks{ + finder.On("GetBlocks", mock.Anything, "user-1", minT, maxT, mock.Anything).Return(bucketindex.Blocks{ &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, map[ulid.ULID]*bucketindex.BlockDeletionMark(nil), nil) @@ -1719,7 +1720,7 @@ func TestBlocksStoreQuerier_Labels(t *testing.T) { finderResult bucketindex.Blocks finderErr error limit int - storeSetResponses []interface{} + storeSetResponses []any expectedLabelNames []string expectedLabelValues []string // For __name__ expectedErr string @@ -1738,7 +1739,7 @@ func TestBlocksStoreQuerier_Labels(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ errors.New("no client found"), }, expectedErr: "no client found", @@ -1748,7 +1749,7 @@ func TestBlocksStoreQuerier_Labels(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{ remoteAddr: "1.1.1.1", @@ -1773,7 +1774,7 @@ func TestBlocksStoreQuerier_Labels(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{ remoteAddr: "1.1.1.1", @@ -1811,7 +1812,7 @@ func TestBlocksStoreQuerier_Labels(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{ remoteAddr: "1.1.1.1", @@ -1852,7 +1853,7 @@ func TestBlocksStoreQuerier_Labels(t *testing.T) { // Block1 has series1 and series2 // Block2 has only series1 // Block3 has only series2 - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{ remoteAddr: "1.1.1.1", @@ -1936,7 +1937,7 @@ func TestBlocksStoreQuerier_Labels(t *testing.T) { // Block1 has series1 and series2 // Block2 has only series1 // Block3 has only series2 - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{ remoteAddr: "1.1.1.1", @@ -1987,7 +1988,7 @@ func TestBlocksStoreQuerier_Labels(t *testing.T) { &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ // First attempt returns a client whose response does not include all expected blocks. map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{ @@ -2016,7 +2017,7 @@ func TestBlocksStoreQuerier_Labels(t *testing.T) { &bucketindex.Block{ID: block3}, &bucketindex.Block{ID: block4}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ // First attempt returns a client whose response does not include all expected blocks. map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{ @@ -2062,7 +2063,7 @@ func TestBlocksStoreQuerier_Labels(t *testing.T) { &bucketindex.Block{ID: block3}, &bucketindex.Block{ID: block4}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ // First attempt returns a client whose response does not include all expected blocks. map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{ @@ -2161,7 +2162,7 @@ func TestBlocksStoreQuerier_Labels(t *testing.T) { finderResult: bucketindex.Blocks{ &bucketindex.Block{ID: block1}, }, - storeSetResponses: []interface{}{ + storeSetResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{ remoteAddr: "1.1.1.1", @@ -2195,7 +2196,6 @@ func TestBlocksStoreQuerier_Labels(t *testing.T) { } for testName, testData := range tests { - testData := testData var hints *storage.LabelHints if testData.limit > 0 { hints = &storage.LabelHints{ @@ -2213,7 +2213,7 @@ func TestBlocksStoreQuerier_Labels(t *testing.T) { reg := prometheus.NewPedanticRegistry() stores := &blocksStoreSetMock{mockedResponses: testData.storeSetResponses} finder := &blocksFinderMock{} - finder.On("GetBlocks", mock.Anything, "user-1", minT, maxT).Return(testData.finderResult, map[ulid.ULID]*bucketindex.BlockDeletionMark(nil), testData.finderErr) + finder.On("GetBlocks", mock.Anything, "user-1", minT, maxT, mock.Anything).Return(testData.finderResult, map[ulid.ULID]*bucketindex.BlockDeletionMark(nil), testData.finderErr) q := &blocksStoreQuerier{ minT: minT, @@ -2315,13 +2315,12 @@ func TestBlocksStoreQuerier_SelectSortedShouldHonorQueryStoreAfter(t *testing.T) } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { t.Parallel() ctx := user.InjectOrgID(context.Background(), "user-1") finder := &blocksFinderMock{} - finder.On("GetBlocks", mock.Anything, "user-1", mock.Anything, mock.Anything).Return(bucketindex.Blocks(nil), map[ulid.ULID]*bucketindex.BlockDeletionMark(nil), error(nil)) + finder.On("GetBlocks", mock.Anything, "user-1", mock.Anything, mock.Anything, mock.Anything).Return(bucketindex.Blocks(nil), map[ulid.ULID]*bucketindex.BlockDeletionMark(nil), error(nil)) q := &blocksStoreQuerier{ minT: testData.queryMinT, @@ -2385,7 +2384,7 @@ func TestBlocksStoreQuerier_PromQLExecution(t *testing.T) { finder := &blocksFinderMock{ Service: services.NewIdleService(nil, nil), } - finder.On("GetBlocks", mock.Anything, "user-1", mock.Anything, mock.Anything).Return(bucketindex.Blocks{ + finder.On("GetBlocks", mock.Anything, "user-1", mock.Anything, mock.Anything, mock.Anything).Return(bucketindex.Blocks{ &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, map[ulid.ULID]*bucketindex.BlockDeletionMark(nil), error(nil)) @@ -2440,7 +2439,7 @@ func TestBlocksStoreQuerier_PromQLExecution(t *testing.T) { stores := &blocksStoreSetMock{ Service: services.NewIdleService(nil, nil), - mockedResponses: []interface{}{ + mockedResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ gateway1: {block1}, gateway2: {block2}, @@ -2481,7 +2480,6 @@ func TestBlocksStoreQuerier_PromQLExecution(t *testing.T) { require.Equal(t, f.T, int64(f.F)) } for i, h := range m.Histograms { - h := h // Check sample timestamp is expected. require.Equal(t, h.T, int64(from)+int64(i)*15000) expectedH := tsdbutil.GenerateTestGaugeFloatHistogram(h.T) @@ -2498,10 +2496,24 @@ func TestBlocksStoreQuerier_PromQLExecution(t *testing.T) { } } +func TestBlocksStoreQuerier_isRetryableError(t *testing.T) { + require.True(t, isRetryableError(status.Error(codes.Unavailable, ""))) + require.True(t, isRetryableError(storegateway.ErrTooManyInflightRequests)) + require.True(t, isRetryableError(limiter.ErrResourceLimitReached)) + require.True(t, isRetryableError(status.Error(codes.Canceled, "grpc: the client connection is closing"))) + require.True(t, isRetryableError(errors.New("pool exhausted"))) + + require.False(t, isRetryableError(status.Error(codes.ResourceExhausted, "some other error"))) + require.False(t, isRetryableError(status.Error(codes.Canceled, "some other error"))) + require.False(t, isRetryableError(errors.New("some other error"))) + require.False(t, isRetryableError(fmt.Errorf("some other error"))) + require.False(t, isRetryableError(httpgrpc.Errorf(http.StatusServiceUnavailable, "some other error"))) +} + type blocksStoreSetMock struct { services.Service - mockedResponses []interface{} + mockedResponses []any nextResult int queriedBlocks []ulid.ULID } @@ -2535,8 +2547,8 @@ type blocksFinderMock struct { mock.Mock } -func (m *blocksFinderMock) GetBlocks(ctx context.Context, userID string, minT, maxT int64) (bucketindex.Blocks, map[ulid.ULID]*bucketindex.BlockDeletionMark, error) { - args := m.Called(ctx, userID, minT, maxT) +func (m *blocksFinderMock) GetBlocks(ctx context.Context, userID string, minT, maxT int64, matchers []*labels.Matcher) (bucketindex.Blocks, map[ulid.ULID]*bucketindex.BlockDeletionMark, error) { + args := m.Called(ctx, userID, minT, maxT, matchers) return args.Get(0).(bucketindex.Blocks), args.Get(1).(map[ulid.ULID]*bucketindex.BlockDeletionMark), args.Error(2) } @@ -2736,9 +2748,9 @@ func mockValuesHints(ids ...ulid.ULID) *types.Any { func namesFromSeries(series ...labels.Labels) []string { namesMap := map[string]struct{}{} for _, s := range series { - for _, l := range s { + s.Range(func(l labels.Label) { namesMap[l.Name] = struct{}{} - } + }) } names := []string{} @@ -2753,11 +2765,11 @@ func namesFromSeries(series ...labels.Labels) []string { func valuesFromSeries(name string, series ...labels.Labels) []string { valuesMap := map[string]struct{}{} for _, s := range series { - for _, l := range s { + s.Range(func(l labels.Label) { if l.Name == name { valuesMap[l.Value] = struct{}{} } - } + }) } values := []string{} diff --git a/pkg/querier/blocks_store_replicated_set.go b/pkg/querier/blocks_store_replicated_set.go index 3305db3b47..d102a522e2 100644 --- a/pkg/querier/blocks_store_replicated_set.go +++ b/pkg/querier/blocks_store_replicated_set.go @@ -5,6 +5,7 @@ import ( "fmt" "math" "math/rand" + "slices" "github.com/go-kit/log" "github.com/oklog/ulid/v2" @@ -179,7 +180,7 @@ func getNonExcludedInstance(set ring.ReplicationSet, exclude []string, balancing } } for _, instance := range set.Instances { - if util.StringsContain(exclude, instance.Addr) { + if slices.Contains(exclude, instance.Addr) { continue } // If zone awareness is not enabled, pick first non-excluded instance. diff --git a/pkg/querier/blocks_store_replicated_set_test.go b/pkg/querier/blocks_store_replicated_set_test.go index 62bf96270c..6ccc65d4a8 100644 --- a/pkg/querier/blocks_store_replicated_set_test.go +++ b/pkg/querier/blocks_store_replicated_set_test.go @@ -555,7 +555,6 @@ func TestBlocksStoreReplicationSet_GetClientsFor(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { t.Parallel() @@ -566,7 +565,7 @@ func TestBlocksStoreReplicationSet_GetClientsFor(t *testing.T) { ringStore, closer := consul.NewInMemoryClient(ring.GetCodec(), log.NewNopLogger(), nil) t.Cleanup(func() { assert.NoError(t, closer.Close()) }) - require.NoError(t, ringStore.CAS(ctx, "test", func(in interface{}) (interface{}, bool, error) { + require.NoError(t, ringStore.CAS(ctx, "test", func(in any) (any, bool, error) { d := ring.NewDesc() testData.setup(d) return d, true, nil @@ -591,7 +590,7 @@ func TestBlocksStoreReplicationSet_GetClientsFor(t *testing.T) { defer services.StopAndAwaitTerminated(ctx, s) //nolint:errcheck // Wait until the ring client has initialised the state. - test.Poll(t, time.Second, true, func() interface{} { + test.Poll(t, time.Second, true, func() any { all, err := r.GetAllHealthy(ring.Read) return err == nil && len(all.Instances) > 0 }) @@ -629,7 +628,7 @@ func TestBlocksStoreReplicationSet_GetClientsFor_ShouldSupportRandomLoadBalancin ringStore, closer := consul.NewInMemoryClient(ring.GetCodec(), log.NewNopLogger(), nil) t.Cleanup(func() { assert.NoError(t, closer.Close()) }) - require.NoError(t, ringStore.CAS(ctx, "test", func(in interface{}) (interface{}, bool, error) { + require.NoError(t, ringStore.CAS(ctx, "test", func(in any) (any, bool, error) { d := ring.NewDesc() for n := 1; n <= numInstances; n++ { d.AddIngester(fmt.Sprintf("instance-%d", n), fmt.Sprintf("127.0.0.%d", n), "", []uint32{uint32(n)}, ring.ACTIVE, registeredAt) @@ -653,7 +652,7 @@ func TestBlocksStoreReplicationSet_GetClientsFor_ShouldSupportRandomLoadBalancin defer services.StopAndAwaitTerminated(ctx, s) //nolint:errcheck // Wait until the ring client has initialised the state. - test.Poll(t, time.Second, true, func() interface{} { + test.Poll(t, time.Second, true, func() any { all, err := r.GetAllHealthy(ring.Read) return err == nil && len(all.Instances) > 0 }) @@ -662,7 +661,7 @@ func TestBlocksStoreReplicationSet_GetClientsFor_ShouldSupportRandomLoadBalancin // requests across store-gateways is balanced. distribution := map[string]int{} - for n := 0; n < numRuns; n++ { + for range numRuns { clients, err := s.GetClientsFor(userID, []ulid.ULID{block1}, nil, nil) require.NoError(t, err) require.Len(t, clients, 1) @@ -697,7 +696,7 @@ func TestBlocksStoreReplicationSet_GetClientsFor_ZoneAwareness(t *testing.T) { ringStore, closer := consul.NewInMemoryClient(ring.GetCodec(), log.NewNopLogger(), nil) t.Cleanup(func() { assert.NoError(t, closer.Close()) }) - require.NoError(t, ringStore.CAS(ctx, "test", func(in interface{}) (interface{}, bool, error) { + require.NoError(t, ringStore.CAS(ctx, "test", func(in any) (any, bool, error) { d := ring.NewDesc() for n := 1; n <= numInstances; n++ { zone := strconv.Itoa((n-1)%3 + 1) @@ -722,14 +721,14 @@ func TestBlocksStoreReplicationSet_GetClientsFor_ZoneAwareness(t *testing.T) { defer services.StopAndAwaitTerminated(ctx, s) //nolint:errcheck // Wait until the ring client has initialised the state. - test.Poll(t, time.Second, true, func() interface{} { + test.Poll(t, time.Second, true, func() any { all, err := r.GetAllHealthy(ring.Read) return err == nil && len(all.Instances) > 0 }) // Target hit shouldn't exist in the blocksMap. targets := [3]int{3, 2, 1} - for i := 0; i < numRuns; i++ { + for i := range numRuns { blocksMap := [3]map[string]int{ {"1": 1, "2": 1}, {"1": 1, "3": 1}, diff --git a/pkg/querier/chunk_store_queryable_test.go b/pkg/querier/chunk_store_queryable_test.go index 1ecbb438d6..9c6c84363a 100644 --- a/pkg/querier/chunk_store_queryable_test.go +++ b/pkg/querier/chunk_store_queryable_test.go @@ -27,7 +27,7 @@ func makeMockChunks(t require.TestingT, numChunks int, enc encoding.Encoding, fr var ( chunks = make([]chunk.Chunk, 0, numChunks) ) - for i := 0; i < numChunks; i++ { + for range numChunks { c := util.GenerateChunk(t, sampleRate, from, int(samplesPerChunk), enc, additionalLabels...) chunks = append(chunks, c) from = from.Add(chunkOffset) diff --git a/pkg/querier/codec/protobuf_codec.go b/pkg/querier/codec/protobuf_codec.go index 64bfa2e394..8a6526f7db 100644 --- a/pkg/querier/codec/protobuf_codec.go +++ b/pkg/querier/codec/protobuf_codec.go @@ -5,6 +5,7 @@ import ( jsoniter "github.com/json-iterator/go" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/util/stats" v1 "github.com/prometheus/prometheus/web/api/v1" @@ -99,25 +100,27 @@ func getMatrixSampleStreams(data *v1.QueryData) *[]tripperware.SampleStream { sampleStreamsLen := len(data.Result.(promql.Matrix)) sampleStreams := make([]tripperware.SampleStream, sampleStreamsLen) - for i := 0; i < sampleStreamsLen; i++ { + for i := range sampleStreamsLen { sampleStream := data.Result.(promql.Matrix)[i] - labelsLen := len(sampleStream.Metric) - var labels []cortexpb.LabelAdapter + labelsLen := sampleStream.Metric.Len() + var lbls []cortexpb.LabelAdapter if labelsLen > 0 { - labels = make([]cortexpb.LabelAdapter, labelsLen) - for j := 0; j < labelsLen; j++ { - labels[j] = cortexpb.LabelAdapter{ - Name: sampleStream.Metric[j].Name, - Value: sampleStream.Metric[j].Value, + lbls = make([]cortexpb.LabelAdapter, labelsLen) + j := 0 + sampleStream.Metric.Range(func(l labels.Label) { + lbls[j] = cortexpb.LabelAdapter{ + Name: l.Name, + Value: l.Value, } - } + j++ + }) } samplesLen := len(sampleStream.Floats) var samples []cortexpb.Sample if samplesLen > 0 { samples = make([]cortexpb.Sample, samplesLen) - for j := 0; j < samplesLen; j++ { + for j := range samplesLen { samples[j] = cortexpb.Sample{ Value: sampleStream.Floats[j].F, TimestampMs: sampleStream.Floats[j].T, @@ -129,7 +132,7 @@ func getMatrixSampleStreams(data *v1.QueryData) *[]tripperware.SampleStream { var histograms []tripperware.SampleHistogramPair if histogramsLen > 0 { histograms = make([]tripperware.SampleHistogramPair, histogramsLen) - for j := 0; j < histogramsLen; j++ { + for j := range histogramsLen { bucketsLen := len(sampleStream.Histograms[j].H.NegativeBuckets) + len(sampleStream.Histograms[j].H.PositiveBuckets) if sampleStream.Histograms[j].H.ZeroCount > 0 { bucketsLen = len(sampleStream.Histograms[j].H.NegativeBuckets) + len(sampleStream.Histograms[j].H.PositiveBuckets) + 1 @@ -145,7 +148,7 @@ func getMatrixSampleStreams(data *v1.QueryData) *[]tripperware.SampleStream { } } } - sampleStreams[i] = tripperware.SampleStream{Labels: labels, Samples: samples, Histograms: histograms} + sampleStreams[i] = tripperware.SampleStream{Labels: lbls, Samples: samples, Histograms: histograms} } return &sampleStreams } @@ -154,20 +157,22 @@ func getVectorSamples(data *v1.QueryData, cortexInternal bool) *[]tripperware.Sa vectorSamplesLen := len(data.Result.(promql.Vector)) vectorSamples := make([]tripperware.Sample, vectorSamplesLen) - for i := 0; i < vectorSamplesLen; i++ { + for i := range vectorSamplesLen { sample := data.Result.(promql.Vector)[i] - labelsLen := len(sample.Metric) - var labels []cortexpb.LabelAdapter + labelsLen := sample.Metric.Len() + var lbls []cortexpb.LabelAdapter if labelsLen > 0 { - labels = make([]cortexpb.LabelAdapter, labelsLen) - for j := 0; j < labelsLen; j++ { - labels[j] = cortexpb.LabelAdapter{ - Name: sample.Metric[j].Name, - Value: sample.Metric[j].Value, + lbls = make([]cortexpb.LabelAdapter, labelsLen) + j := 0 + sample.Metric.Range(func(l labels.Label) { + lbls[j] = cortexpb.LabelAdapter{ + Name: l.Name, + Value: l.Value, } - } + j++ + }) } - vectorSamples[i].Labels = labels + vectorSamples[i].Labels = lbls // Float samples only. if sample.H == nil { @@ -238,7 +243,7 @@ func getBuckets(bucketsLen int, it histogram.BucketIterator[float64]) []*tripper func getStats(builtin *stats.BuiltinStats) *tripperware.PrometheusResponseSamplesStats { queryableSamplesStatsPerStepLen := len(builtin.Samples.TotalQueryableSamplesPerStep) queryableSamplesStatsPerStep := make([]*tripperware.PrometheusResponseQueryableSamplesStatsPerStep, queryableSamplesStatsPerStepLen) - for i := 0; i < queryableSamplesStatsPerStepLen; i++ { + for i := range queryableSamplesStatsPerStepLen { queryableSamplesStatsPerStep[i] = &tripperware.PrometheusResponseQueryableSamplesStatsPerStep{ Value: builtin.Samples.TotalQueryableSamplesPerStep[i].V, TimestampMs: builtin.Samples.TotalQueryableSamplesPerStep[i].T, diff --git a/pkg/querier/codec/protobuf_codec_test.go b/pkg/querier/codec/protobuf_codec_test.go index c7fee0ecba..44ebf6f173 100644 --- a/pkg/querier/codec/protobuf_codec_test.go +++ b/pkg/querier/codec/protobuf_codec_test.go @@ -170,10 +170,7 @@ func TestProtobufCodec_Encode(t *testing.T) { ResultType: parser.ValueTypeMatrix, Result: promql.Matrix{ promql.Series{ - Metric: labels.Labels{ - {Name: "__name__", Value: "foo"}, - {Name: "__job__", Value: "bar"}, - }, + Metric: labels.FromStrings("__name__", "foo", "__job__", "bar"), Floats: []promql.FPoint{ {F: 0.14, T: 18555000}, {F: 2.9, T: 18556000}, @@ -192,8 +189,8 @@ func TestProtobufCodec_Encode(t *testing.T) { SampleStreams: []tripperware.SampleStream{ { Labels: []cortexpb.LabelAdapter{ - {Name: "__name__", Value: "foo"}, {Name: "__job__", Value: "bar"}, + {Name: "__name__", Value: "foo"}, }, Samples: []cortexpb.Sample{ {Value: 0.14, TimestampMs: 18555000}, diff --git a/pkg/querier/distributor_queryable_test.go b/pkg/querier/distributor_queryable_test.go index bb7e20b7ba..825da08860 100644 --- a/pkg/querier/distributor_queryable_test.go +++ b/pkg/querier/distributor_queryable_test.go @@ -82,7 +82,6 @@ func TestDistributorQuerier_SelectShouldHonorQueryIngestersWithin(t *testing.T) for _, streamingMetadataEnabled := range []bool{false, true} { for testName, testData := range tests { - testData := testData t.Run(fmt.Sprintf("%s (streaming metadata enabled: %t)", testName, streamingMetadataEnabled), func(t *testing.T) { t.Parallel() @@ -191,13 +190,13 @@ func TestIngesterStreaming(t *testing.T) { require.True(t, seriesSet.Next()) series := seriesSet.At() - require.Equal(t, labels.Labels{{Name: "bar", Value: "baz"}}, series.Labels()) + require.Equal(t, labels.FromStrings("bar", "baz"), series.Labels()) chkIter := series.Iterator(nil) require.Equal(t, enc.ChunkValueType(), chkIter.Next()) require.True(t, seriesSet.Next()) series = seriesSet.At() - require.Equal(t, labels.Labels{{Name: "foo", Value: "bar"}}, series.Labels()) + require.Equal(t, labels.FromStrings("foo", "bar"), series.Labels()) chkIter = series.Iterator(chkIter) require.Equal(t, enc.ChunkValueType(), chkIter.Next()) diff --git a/pkg/querier/error_translate_queryable_test.go b/pkg/querier/error_translate_queryable_test.go index b1b3414909..daafd709a8 100644 --- a/pkg/querier/error_translate_queryable_test.go +++ b/pkg/querier/error_translate_queryable_test.go @@ -24,6 +24,8 @@ import ( "github.com/weaveworks/common/httpgrpc" "github.com/weaveworks/common/user" + "github.com/cortexproject/cortex/pkg/storegateway" + "github.com/cortexproject/cortex/pkg/util/limiter" "github.com/cortexproject/cortex/pkg/util/validation" ) @@ -113,6 +115,16 @@ func TestApiStatusCodes(t *testing.T) { expectedString: "test string", expectedCode: 422, }, + { + err: storegateway.ErrTooManyInflightRequests, + expectedString: "too many inflight requests in store gateway", + expectedCode: 500, + }, + { + err: limiter.ErrResourceLimitReached, + expectedString: limiter.ErrResourceLimitReachedStr, + expectedCode: 500, + }, } { for k, q := range map[string]storage.SampleAndChunkQueryable{ "error from queryable": errorTestQueryable{err: tc.err}, @@ -176,6 +188,9 @@ func createPrometheusAPI(q storage.SampleAndChunkQueryable, engine promql.QueryE false, false, false, + false, + 5*time.Minute, + false, ) promRouter := route.New().WithPrefix("/api/v1") diff --git a/pkg/querier/parquet_queryable.go b/pkg/querier/parquet_queryable.go index 8d7fe7152e..502a635534 100644 --- a/pkg/querier/parquet_queryable.go +++ b/pkg/querier/parquet_queryable.go @@ -3,16 +3,17 @@ package querier import ( "context" "fmt" + "strings" "time" "github.com/go-kit/log" - "github.com/go-kit/log/level" lru "github.com/hashicorp/golang-lru/v2" "github.com/opentracing/opentracing-go" "github.com/parquet-go/parquet-go" "github.com/pkg/errors" "github.com/prometheus-community/parquet-common/queryable" "github.com/prometheus-community/parquet-common/schema" + "github.com/prometheus-community/parquet-common/search" parquet_storage "github.com/prometheus-community/parquet-common/storage" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" @@ -20,17 +21,18 @@ import ( "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/util/annotations" + "github.com/thanos-io/thanos/pkg/store/storepb" "github.com/thanos-io/thanos/pkg/strutil" "golang.org/x/sync/errgroup" "github.com/cortexproject/cortex/pkg/cortexpb" + "github.com/cortexproject/cortex/pkg/querysharding" "github.com/cortexproject/cortex/pkg/storage/bucket" cortex_tsdb "github.com/cortexproject/cortex/pkg/storage/tsdb" "github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex" "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/limiter" - util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/multierror" "github.com/cortexproject/cortex/pkg/util/services" "github.com/cortexproject/cortex/pkg/util/validation" @@ -49,7 +51,9 @@ const ( parquetBlockStore blockStoreType = "parquet" ) -var validBlockStoreTypes = []blockStoreType{tsdbBlockStore, parquetBlockStore} +var ( + validBlockStoreTypes = []blockStoreType{tsdbBlockStore, parquetBlockStore} +) // AddBlockStoreTypeToContext checks HTTP header and set block store key to context if // relevant header is set. @@ -90,6 +94,7 @@ func newParquetQueryableFallbackMetrics(reg prometheus.Registerer) *parquetQuery type parquetQueryableWithFallback struct { services.Service + fallbackDisabled bool queryStoreAfter time.Duration parquetQueryable storage.Queryable blockStorageQueryable *BlocksStoreQueryable @@ -153,38 +158,35 @@ func NewParquetQueryable( userID, _ := tenant.TenantID(ctx) return int64(limits.ParquetMaxFetchedDataBytes(userID)) }), - queryable.WithMaterializedSeriesCallback(func(ctx context.Context, cs []storage.ChunkSeries) error { + queryable.WithMaterializedLabelsFilterCallback(materializedLabelsFilterCallback), + queryable.WithMaterializedSeriesCallback(func(ctx context.Context, series storage.ChunkSeries) error { queryLimiter := limiter.QueryLimiterFromContextWithFallback(ctx) - lbls := make([][]cortexpb.LabelAdapter, 0, len(cs)) - for _, series := range cs { - chkCount := 0 - chunkSize := 0 - lblSize := 0 - lblAdapter := cortexpb.FromLabelsToLabelAdapters(series.Labels()) - lbls = append(lbls, lblAdapter) - for _, lbl := range lblAdapter { - lblSize += lbl.Size() - } - iter := series.Iterator(nil) - for iter.Next() { - chk := iter.At() - chunkSize += len(chk.Chunk.Bytes()) - chkCount++ - } - if chkCount > 0 { - if err := queryLimiter.AddChunks(chkCount); err != nil { - return validation.LimitError(err.Error()) - } - if err := queryLimiter.AddChunkBytes(chunkSize); err != nil { - return validation.LimitError(err.Error()) - } + chkCount := 0 + chunkSize := 0 + lblSize := 0 + lblAdapter := cortexpb.FromLabelsToLabelAdapters(series.Labels()) + for _, lbl := range lblAdapter { + lblSize += lbl.Size() + } + iter := series.Iterator(nil) + for iter.Next() { + chk := iter.At() + chunkSize += len(chk.Chunk.Bytes()) + chkCount++ + } + if chkCount > 0 { + if err := queryLimiter.AddChunks(chkCount); err != nil { + return validation.LimitError(err.Error()) } - - if err := queryLimiter.AddDataBytes(chunkSize + lblSize); err != nil { + if err := queryLimiter.AddChunkBytes(chunkSize); err != nil { return validation.LimitError(err.Error()) } } - if err := queryLimiter.AddSeries(lbls...); err != nil { + + if err := queryLimiter.AddDataBytes(chunkSize + lblSize); err != nil { + return validation.LimitError(err.Error()) + } + if err := queryLimiter.AddSeries(lblAdapter); err != nil { return validation.LimitError(err.Error()) } return nil @@ -253,6 +255,7 @@ func NewParquetQueryable( limits: limits, logger: logger, defaultBlockStoreType: blockStoreType(config.ParquetQueryableDefaultBlockStore), + fallbackDisabled: config.ParquetQueryableFallbackDisabled, } p.Service = services.NewBasicService(p.starting, p.running, p.stopping) @@ -305,6 +308,7 @@ func (p *parquetQueryableWithFallback) Querier(mint, maxt int64) (storage.Querie limits: p.limits, logger: p.logger, defaultBlockStoreType: p.defaultBlockStoreType, + fallbackDisabled: p.fallbackDisabled, }, nil } @@ -327,13 +331,15 @@ type parquetQuerierWithFallback struct { logger log.Logger defaultBlockStoreType blockStoreType + + fallbackDisabled bool } func (q *parquetQuerierWithFallback) LabelValues(ctx context.Context, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { span, ctx := opentracing.StartSpanFromContext(ctx, "parquetQuerierWithFallback.LabelValues") defer span.Finish() - remaining, parquet, err := q.getBlocks(ctx, q.minT, q.maxT) + remaining, parquet, err := q.getBlocks(ctx, q.minT, q.maxT, matchers) defer q.incrementOpsMetric("LabelValues", remaining, parquet) if err != nil { return nil, nil, err @@ -349,6 +355,10 @@ func (q *parquetQuerierWithFallback) LabelValues(ctx context.Context, name strin rAnnotations annotations.Annotations ) + if len(remaining) > 0 && q.fallbackDisabled { + return nil, nil, parquetConsistencyCheckError(remaining) + } + if len(parquet) > 0 { res, ann, qErr := q.parquetQuerier.LabelValues(InjectBlocksIntoContext(ctx, parquet...), name, hints, matchers...) if qErr != nil { @@ -382,7 +392,7 @@ func (q *parquetQuerierWithFallback) LabelNames(ctx context.Context, hints *stor span, ctx := opentracing.StartSpanFromContext(ctx, "parquetQuerierWithFallback.LabelNames") defer span.Finish() - remaining, parquet, err := q.getBlocks(ctx, q.minT, q.maxT) + remaining, parquet, err := q.getBlocks(ctx, q.minT, q.maxT, matchers) defer q.incrementOpsMetric("LabelNames", remaining, parquet) if err != nil { return nil, nil, err @@ -399,6 +409,10 @@ func (q *parquetQuerierWithFallback) LabelNames(ctx context.Context, hints *stor rAnnotations annotations.Annotations ) + if len(remaining) > 0 && q.fallbackDisabled { + return nil, nil, parquetConsistencyCheckError(remaining) + } + if len(parquet) > 0 { res, ann, qErr := q.parquetQuerier.LabelNames(InjectBlocksIntoContext(ctx, parquet...), hints, matchers...) if qErr != nil { @@ -432,18 +446,11 @@ func (q *parquetQuerierWithFallback) Select(ctx context.Context, sortSeries bool span, ctx := opentracing.StartSpanFromContext(ctx, "parquetQuerierWithFallback.Select") defer span.Finish() - userID, err := tenant.TenantID(ctx) + newMatchers, shardInfo, err := querysharding.ExtractShardingInfo(matchers) if err != nil { return storage.ErrSeriesSet(err) } - if q.limits.QueryVerticalShardSize(userID) > 1 { - uLogger := util_log.WithUserID(userID, q.logger) - level.Warn(uLogger).Log("msg", "parquet queryable enabled but vertical sharding > 1. Falling back to the block storage") - - return q.blocksStoreQuerier.Select(ctx, sortSeries, h, matchers...) - } - hints := storage.SelectHints{ Start: q.minT, End: q.maxT, @@ -463,13 +470,18 @@ func (q *parquetQuerierWithFallback) Select(ctx context.Context, sortSeries bool return storage.EmptySeriesSet() } - remaining, parquet, err := q.getBlocks(ctx, mint, maxt) + remaining, parquet, err := q.getBlocks(ctx, mint, maxt, matchers) defer q.incrementOpsMetric("Select", remaining, parquet) if err != nil { return storage.ErrSeriesSet(err) } + if len(remaining) > 0 && q.fallbackDisabled { + err = parquetConsistencyCheckError(remaining) + return storage.ErrSeriesSet(err) + } + // Lets sort the series to merge if len(parquet) > 0 && len(remaining) > 0 { sortSeries = true @@ -483,7 +495,11 @@ func (q *parquetQuerierWithFallback) Select(ctx context.Context, sortSeries bool go func() { span, _ := opentracing.StartSpanFromContext(ctx, "parquetQuerier.Select") defer span.Finish() - p <- q.parquetQuerier.Select(InjectBlocksIntoContext(ctx, parquet...), sortSeries, &hints, matchers...) + parquetCtx := InjectBlocksIntoContext(ctx, parquet...) + if shardInfo != nil { + parquetCtx = injectShardInfoIntoContext(parquetCtx, shardInfo) + } + p <- q.parquetQuerier.Select(parquetCtx, sortSeries, &hints, newMatchers...) }() } @@ -526,7 +542,7 @@ func (q *parquetQuerierWithFallback) Close() error { return mErr.Err() } -func (q *parquetQuerierWithFallback) getBlocks(ctx context.Context, minT, maxT int64) ([]*bucketindex.Block, []*bucketindex.Block, error) { +func (q *parquetQuerierWithFallback) getBlocks(ctx context.Context, minT, maxT int64, matchers []*labels.Matcher) ([]*bucketindex.Block, []*bucketindex.Block, error) { userID, err := tenant.TenantID(ctx) if err != nil { return nil, nil, err @@ -538,7 +554,7 @@ func (q *parquetQuerierWithFallback) getBlocks(ctx context.Context, minT, maxT i return nil, nil, nil } - blocks, _, err := q.finder.GetBlocks(ctx, userID, minT, maxT) + blocks, _, err := q.finder.GetBlocks(ctx, userID, minT, maxT, matchers) if err != nil { return nil, nil, err } @@ -570,6 +586,30 @@ func (q *parquetQuerierWithFallback) incrementOpsMetric(method string, remaining } } +type shardMatcherLabelsFilter struct { + shardMatcher *storepb.ShardMatcher +} + +func (f *shardMatcherLabelsFilter) Filter(lbls labels.Labels) bool { + return f.shardMatcher.MatchesLabels(lbls) +} + +func (f *shardMatcherLabelsFilter) Close() { + f.shardMatcher.Close() +} + +func materializedLabelsFilterCallback(ctx context.Context, _ *storage.SelectHints) (search.MaterializedLabelsFilter, bool) { + shardInfo, exists := extractShardInfoFromContext(ctx) + if !exists { + return nil, false + } + sm := shardInfo.Matcher(&querysharding.Buffers) + if !sm.IsSharded() { + return nil, false + } + return &shardMatcherLabelsFilter{shardMatcher: sm}, true +} + type cacheInterface[T any] interface { Get(path string) T Set(path string, reader T) @@ -655,3 +695,31 @@ func (n noopCache[T]) Get(_ string) (r T) { func (n noopCache[T]) Set(_ string, _ T) { } + +var ( + shardInfoCtxKey contextKey = 1 +) + +func injectShardInfoIntoContext(ctx context.Context, si *storepb.ShardInfo) context.Context { + return context.WithValue(ctx, shardInfoCtxKey, si) +} + +func extractShardInfoFromContext(ctx context.Context) (*storepb.ShardInfo, bool) { + if si := ctx.Value(shardInfoCtxKey); si != nil { + return si.(*storepb.ShardInfo), true + } + + return nil, false +} + +func parquetConsistencyCheckError(blocks []*bucketindex.Block) error { + return fmt.Errorf("consistency check failed because some blocks were not available as parquet files: %s", strings.Join(convertBlockULIDToString(blocks), " ")) +} + +func convertBlockULIDToString(blocks []*bucketindex.Block) []string { + res := make([]string, len(blocks)) + for idx, b := range blocks { + res[idx] = b.ID.String() + } + return res +} diff --git a/pkg/querier/parquet_queryable_test.go b/pkg/querier/parquet_queryable_test.go index 13cdde6cd5..e842a69dda 100644 --- a/pkg/querier/parquet_queryable_test.go +++ b/pkg/querier/parquet_queryable_test.go @@ -5,6 +5,8 @@ import ( "fmt" "math/rand" "path/filepath" + "strconv" + "sync" "testing" "time" @@ -48,11 +50,11 @@ func TestParquetQueryableFallbackLogic(t *testing.T) { maxT := util.TimeToMillis(time.Now()) createStore := func() *blocksStoreSetMock { - return &blocksStoreSetMock{mockedResponses: []interface{}{ + return &blocksStoreSetMock{mockedResponses: []any{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{{Name: labels.MetricName, Value: "fromSg"}}, []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}, nil, nil), + mockSeriesResponse(labels.FromStrings(labels.MetricName, "fromSg"), []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}, nil, nil), mockHintsResponse(block1, block2), }, mockedLabelNamesResponse: &storepb.LabelNamesResponse{ @@ -75,49 +77,6 @@ func TestParquetQueryableFallbackLogic(t *testing.T) { } ctx := user.InjectOrgID(context.Background(), "user-1") - t.Run("should fallback when vertical sharding is enabled", func(t *testing.T) { - finder := &blocksFinderMock{} - stores := createStore() - - q := &blocksStoreQuerier{ - minT: minT, - maxT: maxT, - finder: finder, - stores: stores, - consistency: NewBlocksConsistencyChecker(0, 0, log.NewNopLogger(), nil), - logger: log.NewNopLogger(), - metrics: newBlocksStoreQueryableMetrics(prometheus.NewPedanticRegistry()), - limits: &blocksStoreLimitsMock{}, - - storeGatewayConsistencyCheckMaxAttempts: 3, - } - - mParquetQuerier := &mockParquetQuerier{} - pq := &parquetQuerierWithFallback{ - minT: minT, - maxT: maxT, - finder: finder, - blocksStoreQuerier: q, - parquetQuerier: mParquetQuerier, - metrics: newParquetQueryableFallbackMetrics(prometheus.NewRegistry()), - limits: defaultOverrides(t, 4), - logger: log.NewNopLogger(), - defaultBlockStoreType: parquetBlockStore, - } - - finder.On("GetBlocks", mock.Anything, "user-1", minT, maxT).Return(bucketindex.Blocks{ - &bucketindex.Block{ID: block1, Parquet: &parquet.ConverterMarkMeta{Version: 1}}, - &bucketindex.Block{ID: block2, Parquet: &parquet.ConverterMarkMeta{Version: 1}}, - }, map[ulid.ULID]*bucketindex.BlockDeletionMark(nil), nil) - - t.Run("select", func(t *testing.T) { - ss := pq.Select(ctx, true, nil, matchers...) - require.NoError(t, ss.Err()) - require.Len(t, stores.queriedBlocks, 2) - require.Len(t, mParquetQuerier.queriedBlocks, 0) - }) - }) - t.Run("should fallback all blocks", func(t *testing.T) { finder := &blocksFinderMock{} stores := createStore() @@ -149,7 +108,7 @@ func TestParquetQueryableFallbackLogic(t *testing.T) { defaultBlockStoreType: parquetBlockStore, } - finder.On("GetBlocks", mock.Anything, "user-1", minT, mock.Anything).Return(bucketindex.Blocks{ + finder.On("GetBlocks", mock.Anything, "user-1", minT, mock.Anything, mock.Anything).Return(bucketindex.Blocks{ &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, map[ulid.ULID]*bucketindex.BlockDeletionMark(nil), nil) @@ -210,7 +169,7 @@ func TestParquetQueryableFallbackLogic(t *testing.T) { defaultBlockStoreType: parquetBlockStore, } - finder.On("GetBlocks", mock.Anything, "user-1", minT, maxT).Return(bucketindex.Blocks{ + finder.On("GetBlocks", mock.Anything, "user-1", minT, maxT, mock.Anything).Return(bucketindex.Blocks{ &bucketindex.Block{ID: block1, Parquet: &parquet.ConverterMarkMeta{Version: 1}}, &bucketindex.Block{ID: block2}, }, map[ulid.ULID]*bucketindex.BlockDeletionMark(nil), nil) @@ -279,7 +238,7 @@ func TestParquetQueryableFallbackLogic(t *testing.T) { defaultBlockStoreType: parquetBlockStore, } - finder.On("GetBlocks", mock.Anything, "user-1", minT, mock.Anything).Return(bucketindex.Blocks{ + finder.On("GetBlocks", mock.Anything, "user-1", minT, mock.Anything, mock.Anything).Return(bucketindex.Blocks{ &bucketindex.Block{ID: block1, Parquet: &parquet.ConverterMarkMeta{Version: 1}}, &bucketindex.Block{ID: block2, Parquet: &parquet.ConverterMarkMeta{Version: 1}}, }, map[ulid.ULID]*bucketindex.BlockDeletionMark(nil), nil) @@ -353,7 +312,7 @@ func TestParquetQueryableFallbackLogic(t *testing.T) { defaultBlockStoreType: tsdbBlockStore, } - finder.On("GetBlocks", mock.Anything, "user-1", minT, maxT).Return(bucketindex.Blocks{ + finder.On("GetBlocks", mock.Anything, "user-1", minT, maxT, mock.Anything).Return(bucketindex.Blocks{ &bucketindex.Block{ID: block1, Parquet: &parquet.ConverterMarkMeta{Version: 1}}, &bucketindex.Block{ID: block2, Parquet: &parquet.ConverterMarkMeta{Version: 1}}, }, map[ulid.ULID]*bucketindex.BlockDeletionMark(nil), nil) @@ -456,11 +415,8 @@ func TestParquetQueryable_Limits(t *testing.T) { ctx := context.Background() seriesCount := 100 lbls := make([]labels.Labels, seriesCount) - for i := 0; i < seriesCount; i++ { - lbls[i] = labels.Labels{ - {Name: labels.MetricName, Value: metricName}, - {Name: "series", Value: fmt.Sprintf("%d", i)}, - } + for i := range seriesCount { + lbls[i] = labels.FromStrings(labels.MetricName, metricName, "series", strconv.Itoa(i)) } rnd := rand.New(rand.NewSource(time.Now().UnixNano())) @@ -477,7 +433,7 @@ func TestParquetQueryable_Limits(t *testing.T) { // Create a mocked bucket index blocks finder finder := &blocksFinderMock{} - finder.On("GetBlocks", mock.Anything, "user-1", minT, maxT).Return(bucketindex.Blocks{ + finder.On("GetBlocks", mock.Anything, "user-1", minT, maxT, mock.Anything).Return(bucketindex.Blocks{ &bucketindex.Block{ID: blockID, Parquet: &parquet.ConverterMarkMeta{Version: parquet.CurrentVersion}}, }, map[ulid.ULID]*bucketindex.BlockDeletionMark(nil), nil) @@ -522,7 +478,7 @@ func TestParquetQueryable_Limits(t *testing.T) { return validation.NewOverrides(limits, nil) }(), queryLimiter: limiter.NewQueryLimiter(0, 1, 0, 0), - expectedErr: fmt.Errorf("materializer failed to materialize chunks: would fetch too many chunk bytes: resource exhausted (used 1)"), + expectedErr: fmt.Errorf("materializer failed to create chunks iterator: failed to create column value iterator: would fetch too many chunk bytes: resource exhausted (used 1)"), }, "max chunk bytes per query limit hit": { limits: func() *validation.Overrides { @@ -541,7 +497,7 @@ func TestParquetQueryable_Limits(t *testing.T) { return validation.NewOverrides(limits, nil) }(), queryLimiter: limiter.NewQueryLimiter(0, 0, 0, 1), - expectedErr: fmt.Errorf("error materializing labels: materializer failed to materialize columns: would fetch too many data bytes: resource exhausted (used 1)"), + expectedErr: fmt.Errorf("error materializing labels: failed to get column indexes: failed to materialize column indexes: would fetch too many data bytes: resource exhausted (used 1)"), }, "limits within bounds - should succeed": { limits: func() *validation.Overrides { @@ -558,7 +514,6 @@ func TestParquetQueryable_Limits(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { t.Parallel() @@ -671,3 +626,256 @@ func (m *mockParquetQuerier) Reset() { func (mockParquetQuerier) Close() error { return nil } + +func TestMaterializedLabelsFilterCallback(t *testing.T) { + tests := []struct { + name string + setupContext func() context.Context + expectedFilterReturned bool + expectedCallbackReturned bool + }{ + { + name: "no shard matcher in context", + setupContext: func() context.Context { + return context.Background() + }, + expectedFilterReturned: false, + expectedCallbackReturned: false, + }, + { + name: "shard matcher exists but is not sharded", + setupContext: func() context.Context { + // Create a ShardInfo with TotalShards = 0 (not sharded) + shardInfo := &storepb.ShardInfo{ + ShardIndex: 0, + TotalShards: 0, // Not sharded + By: true, + Labels: []string{"__name__"}, + } + + return injectShardInfoIntoContext(context.Background(), shardInfo) + }, + expectedFilterReturned: false, + expectedCallbackReturned: false, + }, + { + name: "shard matcher exists and is sharded", + setupContext: func() context.Context { + // Create a ShardInfo with TotalShards > 0 (sharded) + shardInfo := &storepb.ShardInfo{ + ShardIndex: 0, + TotalShards: 2, // Sharded + By: true, + Labels: []string{"__name__"}, + } + + return injectShardInfoIntoContext(context.Background(), shardInfo) + }, + expectedFilterReturned: true, + expectedCallbackReturned: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctx := tt.setupContext() + + filter, exists := materializedLabelsFilterCallback(ctx, nil) + + require.Equal(t, tt.expectedCallbackReturned, exists) + + if tt.expectedFilterReturned { + require.NotNil(t, filter) + + // Test that the filter can be used + testLabels := labels.FromStrings("__name__", "test_metric", "label1", "value1") + // We can't easily test the actual filtering logic without knowing the internal + // shard matching implementation, but we can at least verify the filter interface works + _ = filter.Filter(testLabels) + + // Cleanup + filter.Close() + } else { + require.Nil(t, filter) + } + }) + } +} + +func TestMaterializedLabelsFilterCallbackConcurrent(t *testing.T) { + wg := sync.WaitGroup{} + wg.Add(10) + si := &storepb.ShardInfo{ + ShardIndex: 0, + TotalShards: 2, + By: true, + Labels: []string{"__name__"}, + } + for range 10 { + go func() { + defer wg.Done() + ctx := injectShardInfoIntoContext(context.Background(), si) + filter, exists := materializedLabelsFilterCallback(ctx, nil) + require.Equal(t, true, exists) + for j := range 1000 { + filter.Filter(labels.FromStrings("__name__", "test_metric", "label_1", strconv.Itoa(j))) + } + filter.Close() + }() + } + wg.Wait() +} + +func TestParquetQueryableFallbackDisabled(t *testing.T) { + block1 := ulid.MustNew(1, nil) + block2 := ulid.MustNew(2, nil) + minT := int64(10) + maxT := util.TimeToMillis(time.Now()) + + createStore := func() *blocksStoreSetMock { + return &blocksStoreSetMock{mockedResponses: []any{ + map[BlocksStoreClient][]ulid.ULID{ + &storeGatewayClientMock{remoteAddr: "1.1.1.1", + mockedSeriesResponses: []*storepb.SeriesResponse{ + mockSeriesResponse(labels.FromStrings(labels.MetricName, "fromSg"), []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}, nil, nil), + mockHintsResponse(block1, block2), + }, + mockedLabelNamesResponse: &storepb.LabelNamesResponse{ + Names: namesFromSeries(labels.FromMap(map[string]string{labels.MetricName: "fromSg", "fromSg": "fromSg"})), + Warnings: []string{}, + Hints: mockNamesHints(block1, block2), + }, + mockedLabelValuesResponse: &storepb.LabelValuesResponse{ + Values: valuesFromSeries(labels.MetricName, labels.FromMap(map[string]string{labels.MetricName: "fromSg", "fromSg": "fromSg"})), + Warnings: []string{}, + Hints: mockValuesHints(block1, block2), + }, + }: {block1, block2}}, + }, + } + } + + matchers := []*labels.Matcher{ + labels.MustNewMatcher(labels.MatchEqual, labels.MetricName, "fromSg"), + } + ctx := user.InjectOrgID(context.Background(), "user-1") + + t.Run("should return consistency check errors when fallback disabled and some blocks not available as parquet", func(t *testing.T) { + finder := &blocksFinderMock{} + stores := createStore() + + q := &blocksStoreQuerier{ + minT: minT, + maxT: maxT, + finder: finder, + stores: stores, + consistency: NewBlocksConsistencyChecker(0, 0, log.NewNopLogger(), nil), + logger: log.NewNopLogger(), + metrics: newBlocksStoreQueryableMetrics(prometheus.NewPedanticRegistry()), + limits: &blocksStoreLimitsMock{}, + + storeGatewayConsistencyCheckMaxAttempts: 3, + } + + mParquetQuerier := &mockParquetQuerier{} + pq := &parquetQuerierWithFallback{ + minT: minT, + maxT: maxT, + finder: finder, + blocksStoreQuerier: q, + parquetQuerier: mParquetQuerier, + queryStoreAfter: time.Hour, + metrics: newParquetQueryableFallbackMetrics(prometheus.NewRegistry()), + limits: defaultOverrides(t, 0), + logger: log.NewNopLogger(), + defaultBlockStoreType: parquetBlockStore, + fallbackDisabled: true, // Disable fallback + } + + // Set up blocks where block1 has parquet metadata but block2 doesn't + finder.On("GetBlocks", mock.Anything, "user-1", minT, mock.Anything, mock.Anything).Return(bucketindex.Blocks{ + &bucketindex.Block{ID: block1, Parquet: &parquet.ConverterMarkMeta{Version: 1}}, // Available as parquet + &bucketindex.Block{ID: block2}, // Not available as parquet + }, map[ulid.ULID]*bucketindex.BlockDeletionMark(nil), nil) + + expectedError := fmt.Sprintf("consistency check failed because some blocks were not available as parquet files: %s", block2.String()) + + t.Run("select should return consistency check error", func(t *testing.T) { + ss := pq.Select(ctx, true, nil, matchers...) + require.Error(t, ss.Err()) + require.Contains(t, ss.Err().Error(), expectedError) + }) + + t.Run("labelNames should return consistency check error", func(t *testing.T) { + _, _, err := pq.LabelNames(ctx, nil, matchers...) + require.Error(t, err) + require.Contains(t, err.Error(), expectedError) + }) + + t.Run("labelValues should return consistency check error", func(t *testing.T) { + _, _, err := pq.LabelValues(ctx, labels.MetricName, nil, matchers...) + require.Error(t, err) + require.Contains(t, err.Error(), expectedError) + }) + }) + + t.Run("should work normally when all blocks are available as parquet and fallback disabled", func(t *testing.T) { + finder := &blocksFinderMock{} + stores := createStore() + + q := &blocksStoreQuerier{ + minT: minT, + maxT: maxT, + finder: finder, + stores: stores, + consistency: NewBlocksConsistencyChecker(0, 0, log.NewNopLogger(), nil), + logger: log.NewNopLogger(), + metrics: newBlocksStoreQueryableMetrics(prometheus.NewPedanticRegistry()), + limits: &blocksStoreLimitsMock{}, + + storeGatewayConsistencyCheckMaxAttempts: 3, + } + + mParquetQuerier := &mockParquetQuerier{} + pq := &parquetQuerierWithFallback{ + minT: minT, + maxT: maxT, + finder: finder, + blocksStoreQuerier: q, + parquetQuerier: mParquetQuerier, + queryStoreAfter: time.Hour, + metrics: newParquetQueryableFallbackMetrics(prometheus.NewRegistry()), + limits: defaultOverrides(t, 0), + logger: log.NewNopLogger(), + defaultBlockStoreType: parquetBlockStore, + fallbackDisabled: true, // Disable fallback + } + + // Set up blocks where both blocks have parquet metadata + finder.On("GetBlocks", mock.Anything, "user-1", minT, mock.Anything, mock.Anything).Return(bucketindex.Blocks{ + &bucketindex.Block{ID: block1, Parquet: &parquet.ConverterMarkMeta{Version: 1}}, // Available as parquet + &bucketindex.Block{ID: block2, Parquet: &parquet.ConverterMarkMeta{Version: 1}}, // Available as parquet + }, map[ulid.ULID]*bucketindex.BlockDeletionMark(nil), nil) + + t.Run("select should work without error", func(t *testing.T) { + mParquetQuerier.Reset() + ss := pq.Select(ctx, true, nil, matchers...) + require.NoError(t, ss.Err()) + require.Len(t, mParquetQuerier.queriedBlocks, 2) + }) + + t.Run("labelNames should work without error", func(t *testing.T) { + mParquetQuerier.Reset() + _, _, err := pq.LabelNames(ctx, nil, matchers...) + require.NoError(t, err) + require.Len(t, mParquetQuerier.queriedBlocks, 2) + }) + + t.Run("labelValues should work without error", func(t *testing.T) { + mParquetQuerier.Reset() + _, _, err := pq.LabelValues(ctx, labels.MetricName, nil, matchers...) + require.NoError(t, err) + require.Len(t, mParquetQuerier.queriedBlocks, 2) + }) + }) +} diff --git a/pkg/querier/querier.go b/pkg/querier/querier.go index ffe6c2e0b5..f84f07b967 100644 --- a/pkg/querier/querier.go +++ b/pkg/querier/querier.go @@ -92,16 +92,19 @@ type Config struct { EnablePromQLExperimentalFunctions bool `yaml:"enable_promql_experimental_functions"` // Query Parquet files if available - EnableParquetQueryable bool `yaml:"enable_parquet_queryable" doc:"hidden"` - ParquetQueryableShardCacheSize int `yaml:"parquet_queryable_shard_cache_size" doc:"hidden"` - ParquetQueryableDefaultBlockStore string `yaml:"parquet_queryable_default_block_store" doc:"hidden"` + EnableParquetQueryable bool `yaml:"enable_parquet_queryable"` + ParquetQueryableShardCacheSize int `yaml:"parquet_queryable_shard_cache_size"` + ParquetQueryableDefaultBlockStore string `yaml:"parquet_queryable_default_block_store"` + ParquetQueryableFallbackDisabled bool `yaml:"parquet_queryable_fallback_disabled"` + + DistributedExecEnabled bool `yaml:"distributed_exec_enabled" doc:"hidden"` } var ( errBadLookbackConfigs = errors.New("bad settings, query_store_after >= query_ingesters_within which can result in queries not being sent") errShuffleShardingLookbackLessThanQueryStoreAfter = errors.New("the shuffle-sharding lookback period should be greater or equal than the configured 'query store after'") errEmptyTimeRange = errors.New("empty time range") - errUnsupportedResponseCompression = errors.New("unsupported response compression. Supported compression 'gzip' and '' (disable compression)") + errUnsupportedResponseCompression = errors.New("unsupported response compression. Supported compression 'gzip', 'snappy', 'zstd' and '' (disable compression)") errInvalidConsistencyCheckAttempts = errors.New("store gateway consistency check max attempts should be greater or equal than 1") errInvalidIngesterQueryMaxAttempts = errors.New("ingester query max attempts should be greater or equal than 1") errInvalidParquetQueryableDefaultBlockStore = errors.New("unsupported parquet queryable default block store. Supported options are tsdb and parquet") @@ -128,7 +131,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.IntVar(&cfg.MaxSamples, "querier.max-samples", 50e6, "Maximum number of samples a single query can load into memory.") f.DurationVar(&cfg.QueryIngestersWithin, "querier.query-ingesters-within", 0, "Maximum lookback beyond which queries are not sent to ingester. 0 means all queries are sent to ingester.") f.BoolVar(&cfg.EnablePerStepStats, "querier.per-step-stats-enabled", false, "Enable returning samples stats per steps in query response.") - f.StringVar(&cfg.ResponseCompression, "querier.response-compression", "gzip", "Use compression for metrics query API or instant and range query APIs. Supports 'gzip' and '' (disable compression)") + f.StringVar(&cfg.ResponseCompression, "querier.response-compression", "gzip", "Use compression for metrics query API or instant and range query APIs. Supported compression 'gzip', 'snappy', 'zstd' and '' (disable compression)") f.DurationVar(&cfg.MaxQueryIntoFuture, "querier.max-query-into-future", 10*time.Minute, "Maximum duration into the future you can query. 0 to disable.") f.DurationVar(&cfg.DefaultEvaluationInterval, "querier.default-evaluation-interval", time.Minute, "The default evaluation interval or step size for subqueries.") f.DurationVar(&cfg.QueryStoreAfter, "querier.query-store-after", 0, "The time after which a metric should be queried from storage and not just ingesters. 0 means all queries are sent to store. When running the blocks storage, if this option is enabled, the time range of the query sent to the store will be manipulated to ensure the query end is not more recent than 'now - query-store-after'.") @@ -143,8 +146,10 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.BoolVar(&cfg.IgnoreMaxQueryLength, "querier.ignore-max-query-length", false, "If enabled, ignore max query length check at Querier select method. Users can choose to ignore it since the validation can be done before Querier evaluation like at Query Frontend or Ruler.") f.BoolVar(&cfg.EnablePromQLExperimentalFunctions, "querier.enable-promql-experimental-functions", false, "[Experimental] If true, experimental promQL functions are enabled.") f.BoolVar(&cfg.EnableParquetQueryable, "querier.enable-parquet-queryable", false, "[Experimental] If true, querier will try to query the parquet files if available.") - f.IntVar(&cfg.ParquetQueryableShardCacheSize, "querier.parquet-queryable-shard-cache-size", 512, "[Experimental] [Experimental] Maximum size of the Parquet queryable shard cache. 0 to disable.") - f.StringVar(&cfg.ParquetQueryableDefaultBlockStore, "querier.parquet-queryable-default-block-store", string(parquetBlockStore), "Parquet queryable's default block store to query. Valid options are tsdb and parquet. If it is set to tsdb, parquet queryable always fallback to store gateway.") + f.IntVar(&cfg.ParquetQueryableShardCacheSize, "querier.parquet-queryable-shard-cache-size", 512, "[Experimental] Maximum size of the Parquet queryable shard cache. 0 to disable.") + f.StringVar(&cfg.ParquetQueryableDefaultBlockStore, "querier.parquet-queryable-default-block-store", string(parquetBlockStore), "[Experimental] Parquet queryable's default block store to query. Valid options are tsdb and parquet. If it is set to tsdb, parquet queryable always fallback to store gateway.") + f.BoolVar(&cfg.DistributedExecEnabled, "querier.distributed-exec-enabled", false, "Experimental: Enables distributed execution of queries by passing logical query plan fragments to downstream components.") + f.BoolVar(&cfg.ParquetQueryableFallbackDisabled, "querier.parquet-queryable-fallback-disabled", false, "[Experimental] Disable Parquet queryable to fallback queries to Store Gateway if the block is not available as Parquet files but available in TSDB. Setting this to true will disable the fallback and users can remove Store Gateway. But need to make sure Parquet files are created before it is queryable.") } // Validate the config @@ -156,7 +161,7 @@ func (cfg *Config) Validate() error { } } - if cfg.ResponseCompression != "" && cfg.ResponseCompression != "gzip" { + if cfg.ResponseCompression != "" && cfg.ResponseCompression != "gzip" && cfg.ResponseCompression != "snappy" && cfg.ResponseCompression != "zstd" { return errUnsupportedResponseCompression } @@ -200,7 +205,7 @@ func getChunksIteratorFunction(_ Config) chunkIteratorFunc { } // New builds a queryable and promql engine. -func New(cfg Config, limits *validation.Overrides, distributor Distributor, stores []QueryableWithFilter, reg prometheus.Registerer, logger log.Logger, isPartialDataEnabled partialdata.IsCfgEnabledFunc) (storage.SampleAndChunkQueryable, storage.ExemplarQueryable, promql.QueryEngine) { +func New(cfg Config, limits *validation.Overrides, distributor Distributor, stores []QueryableWithFilter, reg prometheus.Registerer, logger log.Logger, isPartialDataEnabled partialdata.IsCfgEnabledFunc) (storage.SampleAndChunkQueryable, storage.ExemplarQueryable, engine.QueryEngine) { iteratorFunc := getChunksIteratorFunction(cfg) distributorQueryable := newDistributorQueryable(distributor, cfg.IngesterMetadataStreaming, cfg.IngesterLabelNamesWithMatchers, iteratorFunc, cfg.QueryIngestersWithin, isPartialDataEnabled, cfg.IngesterQueryMaxAttempts) @@ -495,7 +500,6 @@ func (q querier) LabelValues(ctx context.Context, name string, hints *storage.La for _, querier := range queriers { // Need to reassign as the original variable will change and can't be relied on in a goroutine. - querier := querier g.Go(func() error { // NB: Values are sorted in Cortex already. myValues, myWarnings, err := querier.LabelValues(ctx, name, hints, matchers...) @@ -564,7 +568,6 @@ func (q querier) LabelNames(ctx context.Context, hints *storage.LabelHints, matc for _, querier := range queriers { // Need to reassign as the original variable will change and can't be relied on in a goroutine. - querier := querier g.Go(func() error { // NB: Names are sorted in Cortex already. myNames, myWarnings, err := querier.LabelNames(ctx, hints, matchers...) diff --git a/pkg/querier/querier_test.go b/pkg/querier/querier_test.go index 06f44039a1..cf7855eafa 100644 --- a/pkg/querier/querier_test.go +++ b/pkg/querier/querier_test.go @@ -51,11 +51,11 @@ const ( type wrappedQuerier struct { storage.Querier - selectCallsArgs [][]interface{} + selectCallsArgs [][]any } func (q *wrappedQuerier) Select(ctx context.Context, sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { - q.selectCallsArgs = append(q.selectCallsArgs, []interface{}{sortSeries, hints, matchers}) + q.selectCallsArgs = append(q.selectCallsArgs, []any{sortSeries, hints, matchers}) return q.Querier.Select(ctx, sortSeries, hints, matchers...) } @@ -120,11 +120,9 @@ var ( // Very simple single-point gets, with low step. Performance should be // similar to above. { - query: "foo", - step: sampleRate * 4, - labels: labels.Labels{ - labels.Label{Name: model.MetricNameLabel, Value: "foo"}, - }, + query: "foo", + step: sampleRate * 4, + labels: labels.FromStrings(labels.MetricName, "foo"), samples: func(from, through time.Time, step time.Duration) int { return int(through.Sub(from)/step) + 1 }, @@ -182,11 +180,9 @@ var ( // Single points gets with large step; excersise Seek performance. { - query: "foo", - step: sampleRate * 4 * 10, - labels: labels.Labels{ - labels.Label{Name: model.MetricNameLabel, Value: "foo"}, - }, + query: "foo", + step: sampleRate * 4 * 10, + labels: labels.FromStrings(labels.MetricName, "foo"), samples: func(from, through time.Time, step time.Duration) int { return int(through.Sub(from)/step) + 1 }, @@ -329,7 +325,6 @@ func TestShouldSortSeriesIfQueryingMultipleQueryables(t *testing.T) { for _, tc := range tCases { for _, thanosEngine := range []bool{false, true} { - thanosEngine := thanosEngine t.Run(tc.name+fmt.Sprintf("thanos engine: %t, encoding=%s", thanosEngine, enc.String()), func(t *testing.T) { wDistributorQueriable := &wrappedSampleAndChunkQueryable{QueryableWithFilter: tc.distributorQueryable} var wQueriables []QueryableWithFilter @@ -1674,7 +1669,6 @@ func TestConfig_Validate(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { t.Parallel() cfg := &Config{} diff --git a/pkg/querier/series/series_set.go b/pkg/querier/series/series_set.go index 53a3ca4a1b..4aaf6f8930 100644 --- a/pkg/querier/series/series_set.go +++ b/pkg/querier/series/series_set.go @@ -195,17 +195,12 @@ func MetricsToSeriesSet(ctx context.Context, sortSeries bool, ms []model.Metric) } func metricToLabels(m model.Metric) labels.Labels { - ls := make(labels.Labels, 0, len(m)) + builder := labels.NewBuilder(labels.EmptyLabels()) for k, v := range m { - ls = append(ls, labels.Label{ - Name: string(k), - Value: string(v), - }) + builder.Set(string(k), string(v)) + } - // PromQL expects all labels to be sorted! In general, anyone constructing - // a labels.Labels list is responsible for sorting it during construction time. - sort.Sort(ls) - return ls + return builder.Labels() } type byLabels []storage.Series diff --git a/pkg/querier/series/series_set_test.go b/pkg/querier/series/series_set_test.go index 7e243a1444..cf82cb61fe 100644 --- a/pkg/querier/series/series_set_test.go +++ b/pkg/querier/series/series_set_test.go @@ -46,11 +46,5 @@ func TestMatrixToSeriesSetSortsMetricLabels(t *testing.T) { require.NoError(t, ss.Err()) l := ss.At().Labels() - require.Equal(t, labels.Labels{ - {Name: string(model.MetricNameLabel), Value: "testmetric"}, - {Name: "a", Value: "b"}, - {Name: "c", Value: "d"}, - {Name: "e", Value: "f"}, - {Name: "g", Value: "h"}, - }, l) + require.Equal(t, labels.FromStrings(labels.MetricName, "testmetric", "a", "b", "c", "d", "e", "f", "g", "h"), l) } diff --git a/pkg/querier/stats/stats.go b/pkg/querier/stats/stats.go index 127c422878..a834cd311e 100644 --- a/pkg/querier/stats/stats.go +++ b/pkg/querier/stats/stats.go @@ -101,7 +101,7 @@ func (s *QueryStats) AddFetchedSeries(series uint64) { atomic.AddUint64(&s.FetchedSeriesCount, series) } -func (s *QueryStats) AddExtraFields(fieldsVals ...interface{}) { +func (s *QueryStats) AddExtraFields(fieldsVals ...any) { if s == nil { return } @@ -124,15 +124,15 @@ func (s *QueryStats) AddExtraFields(fieldsVals ...interface{}) { } } -func (s *QueryStats) LoadExtraFields() []interface{} { +func (s *QueryStats) LoadExtraFields() []any { if s == nil { - return []interface{}{} + return []any{} } s.m.Lock() defer s.m.Unlock() - r := make([]interface{}, 0, len(s.ExtraFields)) + r := make([]any, 0, len(s.ExtraFields)) for k, v := range s.ExtraFields { r = append(r, k, v) } diff --git a/pkg/querier/stats/stats_test.go b/pkg/querier/stats/stats_test.go index 5f2e850aef..7908d06773 100644 --- a/pkg/querier/stats/stats_test.go +++ b/pkg/querier/stats/stats_test.go @@ -70,14 +70,14 @@ func TestQueryStats_AddExtraFields(t *testing.T) { stats.AddExtraFields("a", "b") stats.AddExtraFields("c") - checkExtraFields(t, []interface{}{"a", "b", "c", ""}, stats.LoadExtraFields()) + checkExtraFields(t, []any{"a", "b", "c", ""}, stats.LoadExtraFields()) }) t.Run("add and load extra fields nil receiver", func(t *testing.T) { var stats *QueryStats stats.AddExtraFields("a", "b") - checkExtraFields(t, []interface{}{}, stats.LoadExtraFields()) + checkExtraFields(t, []any{}, stats.LoadExtraFields()) }) } @@ -251,7 +251,7 @@ func TestStats_Merge(t *testing.T) { assert.Equal(t, uint64(105), stats1.LoadPeakSamples()) assert.Equal(t, uint64(401), stats1.LoadStoreGatewayTouchedPostings()) assert.Equal(t, uint64(601), stats1.LoadStoreGatewayTouchedPostingBytes()) - checkExtraFields(t, []interface{}{"a", "b", "c", "d"}, stats1.LoadExtraFields()) + checkExtraFields(t, []any{"a", "b", "c", "d"}, stats1.LoadExtraFields()) }) t.Run("merge two nil stats objects", func(t *testing.T) { @@ -265,11 +265,11 @@ func TestStats_Merge(t *testing.T) { assert.Equal(t, uint64(0), stats1.LoadFetchedSeries()) assert.Equal(t, uint64(0), stats1.LoadFetchedChunkBytes()) assert.Equal(t, uint64(0), stats1.LoadFetchedDataBytes()) - checkExtraFields(t, []interface{}{}, stats1.LoadExtraFields()) + checkExtraFields(t, []any{}, stats1.LoadExtraFields()) }) } -func checkExtraFields(t *testing.T, expected, actual []interface{}) { +func checkExtraFields(t *testing.T, expected, actual []any) { t.Parallel() assert.Equal(t, len(expected), len(actual)) expectedMap := map[string]string{} diff --git a/pkg/querier/stats_renderer_test.go b/pkg/querier/stats_renderer_test.go index 6f197b0165..0b8d591c2a 100644 --- a/pkg/querier/stats_renderer_test.go +++ b/pkg/querier/stats_renderer_test.go @@ -90,6 +90,9 @@ func Test_StatsRenderer(t *testing.T) { false, false, false, + false, + 5*time.Minute, + false, ) promRouter := route.New().WithPrefix("/api/v1") diff --git a/pkg/querier/store_gateway_client_test.go b/pkg/querier/store_gateway_client_test.go index 74d6c6f7df..34f7452817 100644 --- a/pkg/querier/store_gateway_client_test.go +++ b/pkg/querier/store_gateway_client_test.go @@ -42,7 +42,7 @@ func Test_newStoreGatewayClientFactory(t *testing.T) { reg := prometheus.NewPedanticRegistry() factory := newStoreGatewayClientFactory(cfg, reg) - for i := 0; i < 2; i++ { + for range 2 { client, err := factory(listener.Addr().String()) require.NoError(t, err) defer client.Close() //nolint:errcheck diff --git a/pkg/querier/tenantfederation/exemplar_merge_queryable.go b/pkg/querier/tenantfederation/exemplar_merge_queryable.go index a5f40ca59d..33e16ba276 100644 --- a/pkg/querier/tenantfederation/exemplar_merge_queryable.go +++ b/pkg/querier/tenantfederation/exemplar_merge_queryable.go @@ -144,7 +144,7 @@ func (m mergeExemplarQuerier) Select(start, end int64, matchers ...[]*labels.Mat // filter out tenants to query and unrelated matchers allMatchedTenantIds, allUnrelatedMatchers := filterAllTenantsAndMatchers(m.idLabelName, m.tenantIds, matchers) - jobs := make([]interface{}, len(allMatchedTenantIds)) + jobs := make([]any, len(allMatchedTenantIds)) results := make([][]exemplar.QueryResult, len(allMatchedTenantIds)) var jobPos int @@ -162,7 +162,7 @@ func (m mergeExemplarQuerier) Select(start, end int64, matchers ...[]*labels.Mat jobPos++ } - run := func(ctx context.Context, jobIntf interface{}) error { + run := func(ctx context.Context, jobIntf any) error { job, ok := jobIntf.(*exemplarSelectJob) if !ok { return fmt.Errorf("unexpected type %T", jobIntf) @@ -175,10 +175,7 @@ func (m mergeExemplarQuerier) Select(start, end int64, matchers ...[]*labels.Mat // append __tenant__ label to `seriesLabels` to identify each tenants for i, e := range res { - e.SeriesLabels = setLabelsRetainExisting(e.SeriesLabels, labels.Label{ - Name: m.idLabelName, - Value: job.id, - }) + e.SeriesLabels = setLabelsRetainExisting(e.SeriesLabels, labels.FromStrings(m.idLabelName, job.id)) res[i] = e } diff --git a/pkg/querier/tenantfederation/exemplar_merge_queryable_test.go b/pkg/querier/tenantfederation/exemplar_merge_queryable_test.go index bb48fc0f29..b52bf1b082 100644 --- a/pkg/querier/tenantfederation/exemplar_merge_queryable_test.go +++ b/pkg/querier/tenantfederation/exemplar_merge_queryable_test.go @@ -342,7 +342,7 @@ func Test_MergeExemplarQuerier_Select_WhenUseRegexResolver(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(context.Background(), regexResolver)) // wait update knownUsers - test.Poll(t, time.Second*10, true, func() interface{} { + test.Poll(t, time.Second*10, true, func() any { return testutil.ToFloat64(regexResolver.lastUpdateUserRun) > 0 && testutil.ToFloat64(regexResolver.discoveredUsers) == 2 }) diff --git a/pkg/querier/tenantfederation/merge_queryable.go b/pkg/querier/tenantfederation/merge_queryable.go index 71bf0e2531..3a69a6cc8d 100644 --- a/pkg/querier/tenantfederation/merge_queryable.go +++ b/pkg/querier/tenantfederation/merge_queryable.go @@ -244,7 +244,7 @@ type stringSliceFuncJob struct { // It doesn't require the output of the stringSliceFunc to be sorted, as results // of LabelValues are not sorted. func (m *mergeQuerier) mergeDistinctStringSliceWithTenants(ctx context.Context, f stringSliceFunc, tenants map[string]struct{}, ids []string, queriers []storage.Querier) ([]string, annotations.Annotations, error) { - var jobs []interface{} + var jobs []any for pos, id := range ids { if tenants != nil { @@ -260,7 +260,7 @@ func (m *mergeQuerier) mergeDistinctStringSliceWithTenants(ctx context.Context, } parentCtx := ctx - run := func(ctx context.Context, jobIntf interface{}) error { + run := func(ctx context.Context, jobIntf any) error { job, ok := jobIntf.(*stringSliceFuncJob) if !ok { return fmt.Errorf("unexpected type %T", jobIntf) @@ -339,7 +339,7 @@ func (m *mergeQuerier) Select(ctx context.Context, sortSeries bool, hints *stora log, ctx := spanlogger.New(ctx, "mergeQuerier.Select") defer log.Finish() matchedValues, filteredMatchers := filterValuesByMatchers(m.idLabelName, ids, matchers...) - var jobs = make([]interface{}, len(matchedValues)) + var jobs = make([]any, len(matchedValues)) var seriesSets = make([]storage.SeriesSet, len(matchedValues)) var jobPos int for labelPos := range ids { @@ -355,7 +355,7 @@ func (m *mergeQuerier) Select(ctx context.Context, sortSeries bool, hints *stora } parentCtx := ctx - run := func(ctx context.Context, jobIntf interface{}) error { + run := func(ctx context.Context, jobIntf any) error { job, ok := jobIntf.(*selectJob) if !ok { return fmt.Errorf("unexpected type %T", jobIntf) @@ -364,12 +364,7 @@ func (m *mergeQuerier) Select(ctx context.Context, sortSeries bool, hints *stora newCtx := user.InjectOrgID(parentCtx, job.id) seriesSets[job.pos] = &addLabelsSeriesSet{ upstream: job.querier.Select(newCtx, sortSeries, hints, filteredMatchers...), - labels: labels.Labels{ - { - Name: m.idLabelName, - Value: job.id, - }, - }, + labels: labels.FromStrings(m.idLabelName, job.id), } return nil } @@ -442,7 +437,7 @@ func (m *addLabelsSeriesSet) At() storage.Series { upstream := m.upstream.At() m.currSeries = &addLabelsSeries{ upstream: upstream, - labels: setLabelsRetainExisting(upstream.Labels(), m.labels...), + labels: setLabelsRetainExisting(upstream.Labels(), m.labels), } } return m.currSeries @@ -471,11 +466,11 @@ func rewriteLabelName(s string) string { } // this outputs a more readable error format -func labelsToString(labels labels.Labels) string { - parts := make([]string, len(labels)) - for pos, l := range labels { - parts[pos] = rewriteLabelName(l.Name) + " " + l.Value - } +func labelsToString(lbls labels.Labels) string { + parts := make([]string, 0, lbls.Len()) + lbls.Range(func(l labels.Label) { + parts = append(parts, rewriteLabelName(l.Name)+" "+l.Value) + }) return strings.Join(parts, ", ") } @@ -496,17 +491,17 @@ func (a *addLabelsSeries) Iterator(it chunkenc.Iterator) chunkenc.Iterator { // this sets a label and preserves an existing value a new label prefixed with // original_. It doesn't do this recursively. -func setLabelsRetainExisting(src labels.Labels, additionalLabels ...labels.Label) labels.Labels { +func setLabelsRetainExisting(src labels.Labels, additionalLabels labels.Labels) labels.Labels { lb := labels.NewBuilder(src) - for _, additionalL := range additionalLabels { - if oldValue := src.Get(additionalL.Name); oldValue != "" { + for name, value := range additionalLabels.Map() { + if oldValue := src.Get(name); oldValue != "" { lb.Set( - retainExistingPrefix+additionalL.Name, + retainExistingPrefix+name, oldValue, ) } - lb.Set(additionalL.Name, additionalL.Value) + lb.Set(name, value) } return lb.Labels() diff --git a/pkg/querier/tenantfederation/merge_queryable_test.go b/pkg/querier/tenantfederation/merge_queryable_test.go index 8015ca2195..df1ed12468 100644 --- a/pkg/querier/tenantfederation/merge_queryable_test.go +++ b/pkg/querier/tenantfederation/merge_queryable_test.go @@ -492,24 +492,24 @@ func TestMergeQueryable_Select(t *testing.T) { matchers: []*labels.Matcher{{Name: defaultTenantLabel, Value: "team-b", Type: labels.MatchNotEqual}}, expectedSeriesCount: 4, expectedLabels: []labels.Labels{ - { - {Name: "__tenant_id__", Value: "team-a"}, - {Name: "instance", Value: "host1"}, - {Name: "tenant-team-a", Value: "static"}, - }, - { - {Name: "__tenant_id__", Value: "team-a"}, - {Name: "instance", Value: "host2.team-a"}, - }, - { - {Name: "__tenant_id__", Value: "team-c"}, - {Name: "instance", Value: "host1"}, - {Name: "tenant-team-c", Value: "static"}, - }, - { - {Name: "__tenant_id__", Value: "team-c"}, - {Name: "instance", Value: "host2.team-c"}, - }, + labels.FromStrings( + "__tenant_id__", "team-a", + "instance", "host1", + "tenant-team-a", "static", + ), + labels.FromStrings( + "__tenant_id__", "team-a", + "instance", "host2.team-a", + ), + labels.FromStrings( + "__tenant_id__", "team-c", + "instance", "host1", + "tenant-team-c", "static", + ), + labels.FromStrings( + "__tenant_id__", "team-c", + "instance", "host2.team-c", + ), }, expectedMetrics: expectedThreeTenantsMetrics, }, @@ -518,15 +518,15 @@ func TestMergeQueryable_Select(t *testing.T) { matchers: []*labels.Matcher{{Name: defaultTenantLabel, Value: "team-b", Type: labels.MatchEqual}}, expectedSeriesCount: 2, expectedLabels: []labels.Labels{ - { - {Name: "__tenant_id__", Value: "team-b"}, - {Name: "instance", Value: "host1"}, - {Name: "tenant-team-b", Value: "static"}, - }, - { - {Name: "__tenant_id__", Value: "team-b"}, - {Name: "instance", Value: "host2.team-b"}, - }, + labels.FromStrings( + "__tenant_id__", "team-b", + "instance", "host1", + "tenant-team-b", "static", + ), + labels.FromStrings( + "__tenant_id__", "team-b", + "instance", "host2.team-b", + ), }, expectedMetrics: expectedThreeTenantsMetrics, }, @@ -545,39 +545,39 @@ func TestMergeQueryable_Select(t *testing.T) { name: "should return all series when no matchers are provided", expectedSeriesCount: 6, expectedLabels: []labels.Labels{ - { - {Name: "__tenant_id__", Value: "team-a"}, - {Name: "instance", Value: "host1"}, - {Name: "original___tenant_id__", Value: "original-value"}, - {Name: "tenant-team-a", Value: "static"}, - }, - { - {Name: "__tenant_id__", Value: "team-a"}, - {Name: "instance", Value: "host2.team-a"}, - {Name: "original___tenant_id__", Value: "original-value"}, - }, - { - {Name: "__tenant_id__", Value: "team-b"}, - {Name: "instance", Value: "host1"}, - {Name: "original___tenant_id__", Value: "original-value"}, - {Name: "tenant-team-b", Value: "static"}, - }, - { - {Name: "__tenant_id__", Value: "team-b"}, - {Name: "instance", Value: "host2.team-b"}, - {Name: "original___tenant_id__", Value: "original-value"}, - }, - { - {Name: "__tenant_id__", Value: "team-c"}, - {Name: "instance", Value: "host1"}, - {Name: "original___tenant_id__", Value: "original-value"}, - {Name: "tenant-team-c", Value: "static"}, - }, - { - {Name: "__tenant_id__", Value: "team-c"}, - {Name: "instance", Value: "host2.team-c"}, - {Name: "original___tenant_id__", Value: "original-value"}, - }, + labels.FromStrings( + "__tenant_id__", "team-a", + "instance", "host1", + "original___tenant_id__", "original-value", + "tenant-team-a", "static", + ), + labels.FromStrings( + "__tenant_id__", "team-a", + "instance", "host2.team-a", + "original___tenant_id__", "original-value", + ), + labels.FromStrings( + "__tenant_id__", "team-b", + "instance", "host1", + "original___tenant_id__", "original-value", + "tenant-team-b", "static", + ), + labels.FromStrings( + "__tenant_id__", "team-b", + "instance", "host2.team-b", + "original___tenant_id__", "original-value", + ), + labels.FromStrings( + "__tenant_id__", "team-c", + "instance", "host1", + "original___tenant_id__", "original-value", + "tenant-team-c", "static", + ), + labels.FromStrings( + "__tenant_id__", "team-c", + "instance", "host2.team-c", + "original___tenant_id__", "original-value", + ), }, expectedMetrics: expectedThreeTenantsMetrics, }, @@ -599,17 +599,17 @@ func TestMergeQueryable_Select(t *testing.T) { matchers: []*labels.Matcher{{Name: defaultTenantLabel, Value: "team-b", Type: labels.MatchEqual}}, expectedSeriesCount: 2, expectedLabels: []labels.Labels{ - { - {Name: "__tenant_id__", Value: "team-b"}, - {Name: "instance", Value: "host1"}, - {Name: "original___tenant_id__", Value: "original-value"}, - {Name: "tenant-team-b", Value: "static"}, - }, - { - {Name: "__tenant_id__", Value: "team-b"}, - {Name: "instance", Value: "host2.team-b"}, - {Name: "original___tenant_id__", Value: "original-value"}, - }, + labels.FromStrings( + "__tenant_id__", "team-b", + "instance", "host1", + "original___tenant_id__", "original-value", + "tenant-team-b", "static", + ), + labels.FromStrings( + "__tenant_id__", "team-b", + "instance", "host2.team-b", + "original___tenant_id__", "original-value", + ), }, expectedMetrics: expectedThreeTenantsMetrics, }, @@ -654,11 +654,9 @@ func TestMergeQueryable_Select(t *testing.T) { }}, }, } { - scenario := scenario t.Run(scenario.name, func(t *testing.T) { for _, useRegexResolver := range []bool{true, false} { for _, tc := range scenario.selectTestCases { - tc := tc t.Run(fmt.Sprintf("%s, useRegexResolver: %v", tc.name, useRegexResolver), func(t *testing.T) { ctx := context.Background() if useRegexResolver { @@ -686,7 +684,7 @@ func TestMergeQueryable_Select(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(context.Background(), regexResolver)) // wait update knownUsers - test.Poll(t, time.Second*10, true, func() interface{} { + test.Poll(t, time.Second*10, true, func() any { return testutil.ToFloat64(regexResolver.lastUpdateUserRun) > 0 && testutil.ToFloat64(regexResolver.discoveredUsers) == float64(len(scenario.tenants)) }) @@ -857,7 +855,6 @@ func TestMergeQueryable_LabelNames(t *testing.T) { }, }, } { - scenario := scenario for _, useRegexResolver := range []bool{true, false} { t.Run(fmt.Sprintf("%s, useRegexResolver: %v", scenario.mergeQueryableScenario.name, useRegexResolver), func(t *testing.T) { ctx := context.Background() @@ -885,7 +882,7 @@ func TestMergeQueryable_LabelNames(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(context.Background(), regexResolver)) // wait update knownUsers - test.Poll(t, time.Second*10, true, func() interface{} { + test.Poll(t, time.Second*10, true, func() any { return testutil.ToFloat64(regexResolver.lastUpdateUserRun) > 0 && testutil.ToFloat64(regexResolver.discoveredUsers) == float64(len(scenario.tenants)) }) @@ -1093,7 +1090,6 @@ func TestMergeQueryable_LabelValues(t *testing.T) { }}, }, } { - scenario := scenario t.Run(scenario.name, func(t *testing.T) { for _, useRegexResolver := range []bool{true, false} { for _, tc := range scenario.labelValuesTestCases { @@ -1123,7 +1119,7 @@ func TestMergeQueryable_LabelValues(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(context.Background(), regexResolver)) // wait update knownUsers - test.Poll(t, time.Second*10, true, func() interface{} { + test.Poll(t, time.Second*10, true, func() any { return testutil.ToFloat64(regexResolver.lastUpdateUserRun) > 0 && testutil.ToFloat64(regexResolver.discoveredUsers) == float64(len(scenario.tenants)) }) @@ -1178,33 +1174,33 @@ func TestSetLabelsRetainExisting(t *testing.T) { }{ // Test adding labels at the end. { - labels: labels.Labels{{Name: "a", Value: "b"}}, - additionalLabels: labels.Labels{{Name: "c", Value: "d"}}, - expected: labels.Labels{{Name: "a", Value: "b"}, {Name: "c", Value: "d"}}, + labels: labels.FromStrings("a", "b"), + additionalLabels: labels.FromStrings("c", "d"), + expected: labels.FromStrings("a", "b", "c", "d"), }, // Test adding labels at the beginning. { - labels: labels.Labels{{Name: "c", Value: "d"}}, - additionalLabels: labels.Labels{{Name: "a", Value: "b"}}, - expected: labels.Labels{{Name: "a", Value: "b"}, {Name: "c", Value: "d"}}, + labels: labels.FromStrings("c", "d"), + additionalLabels: labels.FromStrings("a", "b"), + expected: labels.FromStrings("a", "b", "c", "d"), }, // Test we do override existing labels and expose the original value. { - labels: labels.Labels{{Name: "a", Value: "b"}}, - additionalLabels: labels.Labels{{Name: "a", Value: "c"}}, - expected: labels.Labels{{Name: "a", Value: "c"}, {Name: "original_a", Value: "b"}}, + labels: labels.FromStrings("a", "b"), + additionalLabels: labels.FromStrings("a", "c"), + expected: labels.FromStrings("a", "c", "original_a", "b"), }, // Test we do override existing labels but don't do it recursively. { - labels: labels.Labels{{Name: "a", Value: "b"}, {Name: "original_a", Value: "i am lost"}}, - additionalLabels: labels.Labels{{Name: "a", Value: "d"}}, - expected: labels.Labels{{Name: "a", Value: "d"}, {Name: "original_a", Value: "b"}}, + labels: labels.FromStrings("a", "b", "original_a", "i am lost"), + additionalLabels: labels.FromStrings("a", "d"), + expected: labels.FromStrings("a", "d", "original_a", "b"), }, } { - assert.Equal(t, tc.expected, setLabelsRetainExisting(tc.labels, tc.additionalLabels...)) + assert.Equal(t, tc.expected, setLabelsRetainExisting(tc.labels, tc.additionalLabels)) } } @@ -1263,7 +1259,7 @@ func containsTags(span *mocktracer.MockSpan, expectedTag expectedTag) bool { type spanWithTags struct { name string - tags map[string]interface{} + tags map[string]any } type expectedTag struct { diff --git a/pkg/querier/tenantfederation/metadata_merge_querier.go b/pkg/querier/tenantfederation/metadata_merge_querier.go index 7f796c2b39..37e5a63f5e 100644 --- a/pkg/querier/tenantfederation/metadata_merge_querier.go +++ b/pkg/querier/tenantfederation/metadata_merge_querier.go @@ -61,7 +61,7 @@ func (m *mergeMetadataQuerier) MetricsMetadata(ctx context.Context, req *client. return m.upstream.MetricsMetadata(ctx, req) } - jobs := make([]interface{}, len(tenantIds)) + jobs := make([]any, len(tenantIds)) results := make([][]scrape.MetricMetadata, len(tenantIds)) var jobPos int @@ -74,7 +74,7 @@ func (m *mergeMetadataQuerier) MetricsMetadata(ctx context.Context, req *client. jobPos++ } - run := func(ctx context.Context, jobIntf interface{}) error { + run := func(ctx context.Context, jobIntf any) error { job, ok := jobIntf.(*metadataSelectJob) if !ok { return fmt.Errorf("unexpected type %T", jobIntf) diff --git a/pkg/querier/tenantfederation/metadata_merge_querier_test.go b/pkg/querier/tenantfederation/metadata_merge_querier_test.go index 95ba851543..c04e4e3c0b 100644 --- a/pkg/querier/tenantfederation/metadata_merge_querier_test.go +++ b/pkg/querier/tenantfederation/metadata_merge_querier_test.go @@ -176,7 +176,7 @@ func Test_mergeMetadataQuerier_MetricsMetadata_WhenUseRegexResolver(t *testing.T require.NoError(t, services.StartAndAwaitRunning(context.Background(), regexResolver)) // wait update knownUsers - test.Poll(t, time.Second*10, true, func() interface{} { + test.Poll(t, time.Second*10, true, func() any { return testutil.ToFloat64(regexResolver.lastUpdateUserRun) > 0 && testutil.ToFloat64(regexResolver.discoveredUsers) == 2 }) diff --git a/pkg/querier/tenantfederation/regex_resolver_test.go b/pkg/querier/tenantfederation/regex_resolver_test.go index e178d91d8a..03735e8b1c 100644 --- a/pkg/querier/tenantfederation/regex_resolver_test.go +++ b/pkg/querier/tenantfederation/regex_resolver_test.go @@ -96,7 +96,7 @@ func Test_RegexResolver(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(context.Background(), regexResolver)) // wait update knownUsers - test.Poll(t, time.Second*10, true, func() interface{} { + test.Poll(t, time.Second*10, true, func() any { return testutil.ToFloat64(regexResolver.lastUpdateUserRun) > 0 && testutil.ToFloat64(regexResolver.discoveredUsers) == float64(len(tc.existingTenants)) }) diff --git a/pkg/querier/testutils.go b/pkg/querier/testutils.go index a032e545dd..4ac69988bf 100644 --- a/pkg/querier/testutils.go +++ b/pkg/querier/testutils.go @@ -142,7 +142,7 @@ func ConvertToChunks(t *testing.T, samples []cortexpb.Sample, histograms []*cort } } - c := chunk.NewChunk(nil, chk, model.Time(samples[0].TimestampMs), model.Time(samples[len(samples)-1].TimestampMs)) + c := chunk.NewChunk(labels.EmptyLabels(), chk, model.Time(samples[0].TimestampMs), model.Time(samples[len(samples)-1].TimestampMs)) clientChunks, err := chunkcompat.ToChunks([]chunk.Chunk{c}) require.NoError(t, err) diff --git a/pkg/querier/tripperware/distributed_query.go b/pkg/querier/tripperware/distributed_query.go index 02a0692153..132b4989db 100644 --- a/pkg/querier/tripperware/distributed_query.go +++ b/pkg/querier/tripperware/distributed_query.go @@ -15,12 +15,13 @@ const ( stepBatch = 10 ) -func DistributedQueryMiddleware(defaultEvaluationInterval time.Duration, lookbackDelta time.Duration) Middleware { +func DistributedQueryMiddleware(defaultEvaluationInterval time.Duration, lookbackDelta time.Duration, optimizers []logicalplan.Optimizer) Middleware { return MiddlewareFunc(func(next Handler) Handler { return distributedQueryMiddleware{ next: next, lookbackDelta: lookbackDelta, defaultEvaluationInterval: defaultEvaluationInterval, + optimizers: optimizers, } }) } @@ -36,6 +37,7 @@ type distributedQueryMiddleware struct { next Handler defaultEvaluationInterval time.Duration lookbackDelta time.Duration + optimizers []logicalplan.Optimizer } func (d distributedQueryMiddleware) newLogicalPlan(qs string, start time.Time, end time.Time, step time.Duration) (*logicalplan.Plan, error) { @@ -64,8 +66,11 @@ func (d distributedQueryMiddleware) newLogicalPlan(qs string, start time.Time, e DisableDuplicateLabelCheck: false, } - logicalPlan := logicalplan.NewFromAST(expr, &qOpts, planOpts) - optimizedPlan, _ := logicalPlan.Optimize(logicalplan.DefaultOptimizers) + logicalPlan, err := logicalplan.NewFromAST(expr, &qOpts, planOpts) + if err != nil { + return nil, err + } + optimizedPlan, _ := logicalPlan.Optimize(d.optimizers) return &optimizedPlan, nil } diff --git a/pkg/querier/tripperware/distributed_query_test.go b/pkg/querier/tripperware/distributed_query_test.go index d11a3dfbba..17b3dd644e 100644 --- a/pkg/querier/tripperware/distributed_query_test.go +++ b/pkg/querier/tripperware/distributed_query_test.go @@ -7,6 +7,7 @@ import ( "time" "github.com/stretchr/testify/require" + "github.com/thanos-io/promql-engine/logicalplan" ) func TestLogicalPlanGeneration(t *testing.T) { @@ -117,11 +118,10 @@ func TestLogicalPlanGeneration(t *testing.T) { } for i, tc := range testCases { - tc := tc t.Run(strconv.Itoa(i)+"_"+tc.name, func(t *testing.T) { t.Parallel() - middleware := DistributedQueryMiddleware(time.Minute, 5*time.Minute) + middleware := DistributedQueryMiddleware(time.Minute, 5*time.Minute, logicalplan.DefaultOptimizers) handler := middleware.Wrap(HandlerFunc(func(_ context.Context, req Request) (Response, error) { return nil, nil diff --git a/pkg/querier/tripperware/instantquery/instant_query.go b/pkg/querier/tripperware/instantquery/instant_query.go index a397720719..4c3e610263 100644 --- a/pkg/querier/tripperware/instantquery/instant_query.go +++ b/pkg/querier/tripperware/instantquery/instant_query.go @@ -22,6 +22,7 @@ import ( "github.com/cortexproject/cortex/pkg/querier/tripperware" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/limiter" + "github.com/cortexproject/cortex/pkg/util/requestmeta" "github.com/cortexproject/cortex/pkg/util/spanlogger" "github.com/thanos-io/promql-engine/logicalplan" @@ -47,8 +48,15 @@ type instantQueryCodec struct { func NewInstantQueryCodec(compressionStr string, defaultCodecTypeStr string) instantQueryCodec { compression := tripperware.NonCompression // default - if compressionStr == string(tripperware.GzipCompression) { + switch compressionStr { + case string(tripperware.GzipCompression): compression = tripperware.GzipCompression + + case string(tripperware.SnappyCompression): + compression = tripperware.SnappyCompression + + case string(tripperware.ZstdCompression): + compression = tripperware.ZstdCompression } defaultCodecType := tripperware.JsonCodecType // default @@ -75,8 +83,7 @@ func (c instantQueryCodec) DecodeRequest(_ context.Context, r *http.Request, for result.Stats = r.FormValue("stats") result.Path = r.URL.Path - isSourceRuler := strings.Contains(r.Header.Get("User-Agent"), tripperware.RulerUserAgent) - if isSourceRuler { + if tripperware.GetSource(r) == requestmeta.SourceRuler { // When the source is the Ruler, then forward whole headers result.Headers = r.Header } else { @@ -102,13 +109,31 @@ func (c instantQueryCodec) DecodeResponse(ctx context.Context, r *http.Response, return nil, err } + responseSizeHeader := r.Header.Get("X-Uncompressed-Length") responseSizeLimiter := limiter.ResponseSizeLimiterFromContextWithFallback(ctx) - body, err := tripperware.BodyBytes(r, responseSizeLimiter, log) + responseSize, hasSizeHeader, err := tripperware.ParseResponseSizeHeader(responseSizeHeader) + if err != nil { + log.Error(err) + return nil, err + } + if hasSizeHeader { + if err := responseSizeLimiter.AddResponseBytes(responseSize); err != nil { + return nil, httpgrpc.Errorf(http.StatusUnprocessableEntity, "%s", err.Error()) + } + } + + body, err := tripperware.BodyBytes(r, log) if err != nil { log.Error(err) return nil, err } + if !hasSizeHeader { + if err := responseSizeLimiter.AddResponseBytes(len(body)); err != nil { + return nil, httpgrpc.Errorf(http.StatusUnprocessableEntity, "%s", err.Error()) + } + } + if r.StatusCode/100 != 2 { return nil, httpgrpc.Errorf(r.StatusCode, "%s", string(body)) } @@ -183,24 +208,27 @@ func (c instantQueryCodec) EncodeRequest(ctx context.Context, r tripperware.Requ } } - h.Add("Content-Type", "application/json") + h.Add("Content-Type", "application/x-www-form-urlencoded") - isSourceRuler := strings.Contains(h.Get("User-Agent"), tripperware.RulerUserAgent) + isSourceRuler := strings.Contains(h.Get("User-Agent"), tripperware.RulerUserAgent) || requestmeta.RequestFromRuler(ctx) if !isSourceRuler { // When the source is the Ruler, skip set header tripperware.SetRequestHeaders(h, c.defaultCodecType, c.compression) } - byteBody, err := c.getSerializedBody(promReq) + bodyBytes, err := c.getSerializedBody(promReq) if err != nil { return nil, err } + form := url.Values{} + form.Set("plan", string(bodyBytes)) + formEncoded := form.Encode() req := &http.Request{ Method: "POST", RequestURI: u.String(), // This is what the httpgrpc code looks at. URL: u, - Body: io.NopCloser(bytes.NewReader(byteBody)), + Body: io.NopCloser(strings.NewReader(formEncoded)), Header: h, } diff --git a/pkg/querier/tripperware/instantquery/instant_query_middlewares.go b/pkg/querier/tripperware/instantquery/instant_query_middlewares.go index a74a9ad147..4346421112 100644 --- a/pkg/querier/tripperware/instantquery/instant_query_middlewares.go +++ b/pkg/querier/tripperware/instantquery/instant_query_middlewares.go @@ -4,8 +4,10 @@ import ( "time" "github.com/go-kit/log" + "github.com/thanos-io/promql-engine/logicalplan" "github.com/thanos-io/thanos/pkg/querysharding" + "github.com/cortexproject/cortex/pkg/distributed_execution" "github.com/cortexproject/cortex/pkg/querier/tripperware" ) @@ -17,6 +19,7 @@ func Middlewares( lookbackDelta time.Duration, defaultEvaluationInterval time.Duration, distributedExecEnabled bool, + localOptimizers []logicalplan.Optimizer, ) ([]tripperware.Middleware, error) { m := []tripperware.Middleware{ NewLimitsMiddleware(limits, lookbackDelta), @@ -25,7 +28,8 @@ func Middlewares( if distributedExecEnabled { m = append(m, - tripperware.DistributedQueryMiddleware(defaultEvaluationInterval, lookbackDelta)) + tripperware.DistributedQueryMiddleware(defaultEvaluationInterval, lookbackDelta, + append(localOptimizers, &distributed_execution.DistributedOptimizer{}))) } return m, nil diff --git a/pkg/querier/tripperware/instantquery/instant_query_middlewares_test.go b/pkg/querier/tripperware/instantquery/instant_query_middlewares_test.go index 122f064562..6646ec1bc0 100644 --- a/pkg/querier/tripperware/instantquery/instant_query_middlewares_test.go +++ b/pkg/querier/tripperware/instantquery/instant_query_middlewares_test.go @@ -62,6 +62,7 @@ func TestRoundTrip(t *testing.T) { 5*time.Minute, time.Minute, false, + logicalplan.DefaultOptimizers, ) require.NoError(t, err) @@ -174,6 +175,7 @@ func TestRoundTripWithAndWithoutDistributedExec(t *testing.T) { 5*time.Minute, time.Minute, tc.distributedEnabled, + logicalplan.DefaultOptimizers, ) require.NoError(t, err) @@ -212,8 +214,7 @@ func TestRoundTripWithAndWithoutDistributedExec(t *testing.T) { require.NoError(t, err) // check request body - body, err := io.ReadAll(req.Body) - require.NoError(t, err) + body := []byte(req.PostFormValue("plan")) if tc.expectEmptyBody { require.Empty(t, body) } else { diff --git a/pkg/querier/tripperware/instantquery/instant_query_test.go b/pkg/querier/tripperware/instantquery/instant_query_test.go index 6aa4e79784..2e56eecd68 100644 --- a/pkg/querier/tripperware/instantquery/instant_query_test.go +++ b/pkg/querier/tripperware/instantquery/instant_query_test.go @@ -90,7 +90,6 @@ func TestRequest(t *testing.T) { }, }, } { - tc := tc t.Run(tc.url, func(t *testing.T) { t.Parallel() r, err := http.NewRequest("POST", tc.url, http.NoBody) @@ -434,7 +433,6 @@ func TestResponse(t *testing.T) { }, }, } { - tc := tc t.Run(strconv.Itoa(i), func(t *testing.T) { t.Parallel() var response *http.Response @@ -709,7 +707,6 @@ func TestMergeResponse(t *testing.T) { cancelBeforeMerge: true, }, } { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() ctx, cancelCtx := context.WithCancel(user.InjectOrgID(context.Background(), "1")) @@ -1722,7 +1719,6 @@ func TestMergeResponseProtobuf(t *testing.T) { cancelBeforeMerge: true, }, } { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() ctx, cancelCtx := context.WithCancel(user.InjectOrgID(context.Background(), "1")) @@ -1821,7 +1817,7 @@ func Benchmark_Decode(b *testing.B) { maxSamplesCount := 1000000 samples := make([]tripperware.SampleStream, maxSamplesCount) - for i := 0; i < maxSamplesCount; i++ { + for i := range maxSamplesCount { samples[i].Labels = append(samples[i].Labels, cortexpb.LabelAdapter{Name: fmt.Sprintf("Sample%v", i), Value: fmt.Sprintf("Value%v", i)}) samples[i].Labels = append(samples[i].Labels, cortexpb.LabelAdapter{Name: fmt.Sprintf("Sample2%v", i), Value: fmt.Sprintf("Value2%v", i)}) samples[i].Labels = append(samples[i].Labels, cortexpb.LabelAdapter{Name: fmt.Sprintf("Sample3%v", i), Value: fmt.Sprintf("Value3%v", i)}) @@ -1864,10 +1860,9 @@ func Benchmark_Decode(b *testing.B) { body, err := json.Marshal(r) require.NoError(b, err) - b.ResetTimer() b.ReportAllocs() - for i := 0; i < b.N; i++ { + for b.Loop() { response := &http.Response{ StatusCode: 200, Body: io.NopCloser(bytes.NewBuffer(body)), @@ -1885,7 +1880,7 @@ func Benchmark_Decode_Protobuf(b *testing.B) { maxSamplesCount := 1000000 samples := make([]tripperware.SampleStream, maxSamplesCount) - for i := 0; i < maxSamplesCount; i++ { + for i := range maxSamplesCount { samples[i].Labels = append(samples[i].Labels, cortexpb.LabelAdapter{Name: fmt.Sprintf("Sample%v", i), Value: fmt.Sprintf("Value%v", i)}) samples[i].Labels = append(samples[i].Labels, cortexpb.LabelAdapter{Name: fmt.Sprintf("Sample2%v", i), Value: fmt.Sprintf("Value2%v", i)}) samples[i].Labels = append(samples[i].Labels, cortexpb.LabelAdapter{Name: fmt.Sprintf("Sample3%v", i), Value: fmt.Sprintf("Value3%v", i)}) @@ -1928,10 +1923,9 @@ func Benchmark_Decode_Protobuf(b *testing.B) { body, err := proto.Marshal(&r) require.NoError(b, err) - b.ResetTimer() b.ReportAllocs() - for i := 0; i < b.N; i++ { + for b.Loop() { response := &http.Response{ StatusCode: 200, Header: http.Header{"Content-Type": []string{"application/x-protobuf"}}, diff --git a/pkg/querier/tripperware/instantquery/limits_test.go b/pkg/querier/tripperware/instantquery/limits_test.go index a365eab414..155921269c 100644 --- a/pkg/querier/tripperware/instantquery/limits_test.go +++ b/pkg/querier/tripperware/instantquery/limits_test.go @@ -68,7 +68,6 @@ func TestLimitsMiddleware_MaxQueryLength(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { t.Parallel() req := &tripperware.PrometheusRequest{Query: testData.query} diff --git a/pkg/querier/tripperware/merge.go b/pkg/querier/tripperware/merge.go index 0e3d8aabb4..3ebf099f67 100644 --- a/pkg/querier/tripperware/merge.go +++ b/pkg/querier/tripperware/merge.go @@ -3,6 +3,7 @@ package tripperware import ( "context" "fmt" + "slices" "sort" "github.com/prometheus/common/model" @@ -247,7 +248,7 @@ func statsMerge(shouldSumStats bool, resps []*PrometheusResponse) *PrometheusRes keys = append(keys, key) } - sort.Slice(keys, func(i, j int) bool { return keys[i] < keys[j] }) + slices.Sort(keys) result := &PrometheusResponseStats{Samples: &PrometheusResponseSamplesStats{}} for _, key := range keys { diff --git a/pkg/querier/tripperware/merge_test.go b/pkg/querier/tripperware/merge_test.go index 7ee5b0cbbd..705f75d2c3 100644 --- a/pkg/querier/tripperware/merge_test.go +++ b/pkg/querier/tripperware/merge_test.go @@ -360,7 +360,6 @@ func TestMergeSampleStreams(t *testing.T) { }, }, } { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() output := make(map[string]SampleStream) @@ -440,7 +439,6 @@ func TestSliceSamples(t *testing.T) { }, }, } { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() actual := sliceSamples(tc.samples, tc.minTs) @@ -589,7 +587,6 @@ func TestSliceHistograms(t *testing.T) { }, }, } { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() actual := sliceHistograms(tc.histograms, tc.minTs) diff --git a/pkg/querier/tripperware/query.go b/pkg/querier/tripperware/query.go index 42e2d9eebf..8742c71698 100644 --- a/pkg/querier/tripperware/query.go +++ b/pkg/querier/tripperware/query.go @@ -4,7 +4,6 @@ import ( "bytes" "compress/gzip" "context" - "encoding/binary" "fmt" "io" "net/http" @@ -16,6 +15,8 @@ import ( "github.com/go-kit/log" "github.com/gogo/protobuf/proto" jsoniter "github.com/json-iterator/go" + "github.com/klauspost/compress/snappy" + "github.com/klauspost/compress/zstd" "github.com/opentracing/opentracing-go" otlog "github.com/opentracing/opentracing-go/log" "github.com/pkg/errors" @@ -27,7 +28,6 @@ import ( "github.com/cortexproject/cortex/pkg/chunk" "github.com/cortexproject/cortex/pkg/cortexpb" - "github.com/cortexproject/cortex/pkg/util/limiter" "github.com/cortexproject/cortex/pkg/util/runutil" "github.com/thanos-io/promql-engine/logicalplan" @@ -46,6 +46,8 @@ type Compression string const ( GzipCompression Compression = "gzip" + ZstdCompression Compression = "zstd" + SnappyCompression Compression = "snappy" NonCompression Compression = "" JsonCodecType CodecType = "json" ProtobufCodecType CodecType = "protobuf" @@ -55,9 +57,6 @@ const ( QueryResponseCortexMIMEType = "application/" + QueryResponseCortexMIMESubType QueryResponseCortexMIMESubType = "x-cortex-query+proto" RulerUserAgent = "CortexRuler" - - SourceRuler = "ruler" - SourceAPI = "api" ) // Codec is used to encode/decode query range requests and responses so they can be passed down to middlewares. @@ -446,7 +445,7 @@ type Buffer interface { Bytes() []byte } -func BodyBytes(res *http.Response, responseSizeLimiter *limiter.ResponseSizeLimiter, logger log.Logger) ([]byte, error) { +func BodyBytes(res *http.Response, logger log.Logger) ([]byte, error) { var buf *bytes.Buffer // Attempt to cast the response body to a Buffer and use it if possible. @@ -464,13 +463,26 @@ func BodyBytes(res *http.Response, responseSizeLimiter *limiter.ResponseSizeLimi } } - responseSize := getResponseSize(res, buf) - if err := responseSizeLimiter.AddResponseBytes(responseSize); err != nil { - return nil, httpgrpc.Errorf(http.StatusUnprocessableEntity, "%s", err.Error()) + // Handle decoding response if it was compressed + encoding := res.Header.Get("Content-Encoding") + return decode(buf, encoding, logger) +} + +func BodyBytesFromHTTPGRPCResponse(res *httpgrpc.HTTPResponse, logger log.Logger) ([]byte, error) { + headers := http.Header{} + for _, h := range res.Headers { + headers[h.Key] = h.Values } + // Handle decoding response if it was compressed + encoding := headers.Get("Content-Encoding") + buf := bytes.NewBuffer(res.Body) + return decode(buf, encoding, logger) +} + +func decode(buf *bytes.Buffer, encoding string, logger log.Logger) ([]byte, error) { // if the response is gzipped, lets unzip it here - if strings.EqualFold(res.Header.Get("Content-Encoding"), "gzip") { + if strings.EqualFold(encoding, "gzip") { gReader, err := gzip.NewReader(buf) if err != nil { return nil, err @@ -480,35 +492,24 @@ func BodyBytes(res *http.Response, responseSizeLimiter *limiter.ResponseSizeLimi return io.ReadAll(gReader) } - return buf.Bytes(), nil -} - -func BodyBytesFromHTTPGRPCResponse(res *httpgrpc.HTTPResponse, logger log.Logger) ([]byte, error) { - // if the response is gzipped, lets unzip it here - headers := http.Header{} - for _, h := range res.Headers { - headers[h.Key] = h.Values + // if the response is snappy compressed, decode it here + if strings.EqualFold(encoding, "snappy") { + sReader := snappy.NewReader(buf) + return io.ReadAll(sReader) } - if strings.EqualFold(headers.Get("Content-Encoding"), "gzip") { - gReader, err := gzip.NewReader(bytes.NewBuffer(res.Body)) + + // if the response is zstd compressed, decode it here + if strings.EqualFold(encoding, "zstd") { + zReader, err := zstd.NewReader(buf) if err != nil { return nil, err } - defer runutil.CloseWithLogOnErr(logger, gReader, "close gzip reader") + defer runutil.CloseWithLogOnErr(logger, zReader.IOReadCloser(), "close zstd decoder") - return io.ReadAll(gReader) + return io.ReadAll(zReader) } - return res.Body, nil -} - -func getResponseSize(res *http.Response, buf *bytes.Buffer) int { - if strings.EqualFold(res.Header.Get("Content-Encoding"), "gzip") && len(buf.Bytes()) >= 4 { - // GZIP body contains the size of the original (uncompressed) input data - // modulo 2^32 in the last 4 bytes (https://www.ietf.org/rfc/rfc1952.txt). - return int(binary.LittleEndian.Uint32(buf.Bytes()[len(buf.Bytes())-4:])) - } - return len(buf.Bytes()) + return buf.Bytes(), nil } // UnmarshalJSON implements json.Unmarshaler. @@ -767,9 +768,17 @@ func (s *PrometheusResponseStats) MarshalJSON() ([]byte, error) { } func SetRequestHeaders(h http.Header, defaultCodecType CodecType, compression Compression) { - if compression == GzipCompression { + switch compression { + case GzipCompression: h.Set("Accept-Encoding", string(GzipCompression)) + + case SnappyCompression: + h.Set("Accept-Encoding", string(SnappyCompression)) + + case ZstdCompression: + h.Set("Accept-Encoding", string(ZstdCompression)) } + if defaultCodecType == ProtobufCodecType { h.Set("Accept", ApplicationProtobuf+", "+ApplicationJson) } else { @@ -777,6 +786,17 @@ func SetRequestHeaders(h http.Header, defaultCodecType CodecType, compression Co } } +func ParseResponseSizeHeader(header string) (int, bool, error) { + if header == "" { + return 0, false, nil + } + size, err := strconv.Atoi(header) + if err != nil { + return 0, false, err + } + return size, true, nil +} + func UnmarshalResponse(r *http.Response, buf []byte, resp *PrometheusResponse) error { if r.Header == nil { return json.Unmarshal(buf, resp) diff --git a/pkg/querier/tripperware/query_attribute_matcher.go b/pkg/querier/tripperware/query_attribute_matcher.go index 002568b7a4..36e3810395 100644 --- a/pkg/querier/tripperware/query_attribute_matcher.go +++ b/pkg/querier/tripperware/query_attribute_matcher.go @@ -2,6 +2,7 @@ package tripperware import ( "net/http" + "slices" "strings" "time" @@ -159,13 +160,7 @@ func matchAttributeForMetadataQuery(attribute validation.QueryAttribute, op stri if attribute.Regex != "" { matched = true if attribute.Regex != ".*" && attribute.CompiledRegex != nil { - atLeastOneMatched := false - for _, matcher := range r.Form["match[]"] { - if attribute.CompiledRegex.MatchString(matcher) { - atLeastOneMatched = true - break - } - } + atLeastOneMatched := slices.ContainsFunc(r.Form["match[]"], attribute.CompiledRegex.MatchString) if !atLeastOneMatched { return false } diff --git a/pkg/querier/tripperware/query_test.go b/pkg/querier/tripperware/query_test.go index 04606df99e..a5d210488d 100644 --- a/pkg/querier/tripperware/query_test.go +++ b/pkg/querier/tripperware/query_test.go @@ -1,10 +1,7 @@ package tripperware import ( - "bytes" - "compress/gzip" "math" - "net/http" "strconv" "testing" "time" @@ -118,7 +115,7 @@ func TestSampleStreamJSONSerialization(t *testing.T) { } func generateData(timeseries, datapoints int) (floatMatrix, histogramMatrix []*SampleStream) { - for i := 0; i < timeseries; i++ { + for i := range timeseries { lset := labels.FromMap(map[string]string{ model.MetricNameLabel: "timeseries_" + strconv.Itoa(i), "foo": "bar", @@ -196,50 +193,3 @@ func generateData(timeseries, datapoints int) (floatMatrix, histogramMatrix []*S } return } - -func Test_getResponseSize(t *testing.T) { - tests := []struct { - body []byte - useGzip bool - }{ - { - body: []byte(`foo`), - useGzip: false, - }, - { - body: []byte(`foo`), - useGzip: true, - }, - { - body: []byte(`{"status":"success","data":{"resultType":"vector","result":[]}}`), - useGzip: false, - }, - { - body: []byte(`{"status":"success","data":{"resultType":"vector","result":[]}}`), - useGzip: true, - }, - } - - for i, test := range tests { - t.Run(strconv.Itoa(i), func(t *testing.T) { - expectedBodyLength := len(test.body) - buf := &bytes.Buffer{} - response := &http.Response{} - - if test.useGzip { - response = &http.Response{ - Header: http.Header{"Content-Encoding": []string{"gzip"}}, - } - w := gzip.NewWriter(buf) - _, err := w.Write(test.body) - require.NoError(t, err) - w.Close() - } else { - buf = bytes.NewBuffer(test.body) - } - - bodyLength := getResponseSize(response, buf) - require.Equal(t, expectedBodyLength, bodyLength) - }) - } -} diff --git a/pkg/querier/tripperware/queryrange/limits_test.go b/pkg/querier/tripperware/queryrange/limits_test.go index 3690e1e038..31d3008e5c 100644 --- a/pkg/querier/tripperware/queryrange/limits_test.go +++ b/pkg/querier/tripperware/queryrange/limits_test.go @@ -71,7 +71,6 @@ func TestLimitsMiddleware_MaxQueryLookback(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { t.Parallel() req := &tripperware.PrometheusRequest{ @@ -190,7 +189,6 @@ func TestLimitsMiddleware_MaxQueryLength(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { t.Parallel() req := &tripperware.PrometheusRequest{ diff --git a/pkg/querier/tripperware/queryrange/marshaling_test.go b/pkg/querier/tripperware/queryrange/marshaling_test.go index d0efd0e8d4..4661d1b168 100644 --- a/pkg/querier/tripperware/queryrange/marshaling_test.go +++ b/pkg/querier/tripperware/queryrange/marshaling_test.go @@ -27,10 +27,9 @@ func BenchmarkPrometheusCodec_DecodeResponse_Json(b *testing.B) { require.NoError(b, err) b.Log("test prometheus response size:", len(encodedRes)) - b.ResetTimer() b.ReportAllocs() - for n := 0; n < b.N; n++ { + for b.Loop() { _, err := PrometheusCodec.DecodeResponse(context.Background(), &http.Response{ StatusCode: 200, Header: http.Header{"Content-Type": []string{tripperware.ApplicationJson}}, @@ -53,10 +52,9 @@ func BenchmarkPrometheusCodec_DecodeResponse_Protobuf(b *testing.B) { require.NoError(b, err) b.Log("test prometheus response size:", len(encodedRes)) - b.ResetTimer() b.ReportAllocs() - for n := 0; n < b.N; n++ { + for b.Loop() { _, err := PrometheusCodec.DecodeResponse(context.Background(), &http.Response{ StatusCode: 200, Header: http.Header{"Content-Type": []string{tripperware.ApplicationProtobuf}}, @@ -76,10 +74,9 @@ func BenchmarkPrometheusCodec_EncodeResponse(b *testing.B) { // Generate a mocked response and marshal it. res := mockPrometheusResponse(numSeries, numSamplesPerSeries) - b.ResetTimer() b.ReportAllocs() - for n := 0; n < b.N; n++ { + for b.Loop() { _, err := PrometheusCodec.EncodeResponse(context.Background(), nil, res) require.NoError(b, err) } @@ -87,10 +84,10 @@ func BenchmarkPrometheusCodec_EncodeResponse(b *testing.B) { func mockPrometheusResponse(numSeries, numSamplesPerSeries int) *tripperware.PrometheusResponse { stream := make([]tripperware.SampleStream, numSeries) - for s := 0; s < numSeries; s++ { + for s := range numSeries { // Generate random samples. samples := make([]cortexpb.Sample, numSamplesPerSeries) - for i := 0; i < numSamplesPerSeries; i++ { + for i := range numSamplesPerSeries { samples[i] = cortexpb.Sample{ Value: rand.Float64(), TimestampMs: int64(i), diff --git a/pkg/querier/tripperware/queryrange/query_range.go b/pkg/querier/tripperware/queryrange/query_range.go index df721146f6..786676846b 100644 --- a/pkg/querier/tripperware/queryrange/query_range.go +++ b/pkg/querier/tripperware/queryrange/query_range.go @@ -63,8 +63,15 @@ type prometheusCodec struct { func NewPrometheusCodec(sharded bool, compressionStr string, defaultCodecTypeStr string) *prometheusCodec { //nolint:revive compression := tripperware.NonCompression // default - if compressionStr == string(tripperware.GzipCompression) { + switch compressionStr { + case string(tripperware.GzipCompression): compression = tripperware.GzipCompression + + case string(tripperware.SnappyCompression): + compression = tripperware.SnappyCompression + + case string(tripperware.ZstdCompression): + compression = tripperware.ZstdCompression } defaultCodecType := tripperware.JsonCodecType // default @@ -189,8 +196,7 @@ func (c prometheusCodec) EncodeRequest(ctx context.Context, r tripperware.Reques h.Add(n, v) } } - - h.Add("Content-Type", "application/json") + h.Add("Content-Type", "application/x-www-form-urlencoded") tripperware.SetRequestHeaders(h, c.defaultCodecType, c.compression) @@ -199,11 +205,15 @@ func (c prometheusCodec) EncodeRequest(ctx context.Context, r tripperware.Reques return nil, err } + form := url.Values{} + form.Set("plan", string(bodyBytes)) + formEncoded := form.Encode() + req := &http.Request{ Method: "POST", RequestURI: u.String(), // This is what the httpgrpc code looks at. URL: u, - Body: io.NopCloser(bytes.NewReader(bodyBytes)), + Body: io.NopCloser(strings.NewReader(formEncoded)), Header: h, } @@ -218,13 +228,31 @@ func (c prometheusCodec) DecodeResponse(ctx context.Context, r *http.Response, _ return nil, err } + responseSizeHeader := r.Header.Get("X-Uncompressed-Length") responseSizeLimiter := limiter.ResponseSizeLimiterFromContextWithFallback(ctx) - body, err := tripperware.BodyBytes(r, responseSizeLimiter, log) + responseSize, hasSizeHeader, err := tripperware.ParseResponseSizeHeader(responseSizeHeader) + if err != nil { + log.Error(err) + return nil, err + } + if hasSizeHeader { + if err := responseSizeLimiter.AddResponseBytes(responseSize); err != nil { + return nil, httpgrpc.Errorf(http.StatusUnprocessableEntity, "%s", err.Error()) + } + } + + body, err := tripperware.BodyBytes(r, log) if err != nil { log.Error(err) return nil, err } + if !hasSizeHeader { + if err := responseSizeLimiter.AddResponseBytes(len(body)); err != nil { + return nil, httpgrpc.Errorf(http.StatusUnprocessableEntity, "%s", err.Error()) + } + } + if r.StatusCode/100 != 2 { return nil, httpgrpc.Errorf(r.StatusCode, "%s", string(body)) } diff --git a/pkg/querier/tripperware/queryrange/query_range_middlewares.go b/pkg/querier/tripperware/queryrange/query_range_middlewares.go index 38493f5413..5d17c70daf 100644 --- a/pkg/querier/tripperware/queryrange/query_range_middlewares.go +++ b/pkg/querier/tripperware/queryrange/query_range_middlewares.go @@ -22,9 +22,11 @@ import ( "github.com/go-kit/log" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" + "github.com/thanos-io/promql-engine/logicalplan" "github.com/thanos-io/thanos/pkg/querysharding" "github.com/cortexproject/cortex/pkg/chunk/cache" + "github.com/cortexproject/cortex/pkg/distributed_execution" "github.com/cortexproject/cortex/pkg/querier" "github.com/cortexproject/cortex/pkg/querier/tripperware" "github.com/cortexproject/cortex/pkg/util/flagext" @@ -104,6 +106,7 @@ func Middlewares( lookbackDelta time.Duration, defaultEvaluationInterval time.Duration, distributedExecEnabled bool, + localOptimizers []logicalplan.Optimizer, ) ([]tripperware.Middleware, cache.Cache, error) { // Metric used to keep track of each middleware execution duration. metrics := tripperware.NewInstrumentMiddlewareMetrics(registerer) @@ -142,7 +145,8 @@ func Middlewares( if distributedExecEnabled { queryRangeMiddleware = append(queryRangeMiddleware, tripperware.InstrumentMiddleware("range_logical_plan_gen", metrics), - tripperware.DistributedQueryMiddleware(defaultEvaluationInterval, lookbackDelta)) + tripperware.DistributedQueryMiddleware(defaultEvaluationInterval, lookbackDelta, + append(localOptimizers, &distributed_execution.DistributedOptimizer{}))) } return queryRangeMiddleware, c, nil diff --git a/pkg/querier/tripperware/queryrange/query_range_middlewares_test.go b/pkg/querier/tripperware/queryrange/query_range_middlewares_test.go index f21eae986d..49fa09a41a 100644 --- a/pkg/querier/tripperware/queryrange/query_range_middlewares_test.go +++ b/pkg/querier/tripperware/queryrange/query_range_middlewares_test.go @@ -69,6 +69,7 @@ func TestRoundTrip(t *testing.T) { 5*time.Minute, time.Minute, false, + logicalplan.DefaultOptimizers, ) require.NoError(t, err) @@ -193,6 +194,7 @@ func TestRoundTripWithAndWithoutDistributedExec(t *testing.T) { 5*time.Minute, time.Minute, tc.distributedEnabled, + logicalplan.DefaultOptimizers, ) require.NoError(t, err) @@ -231,8 +233,7 @@ func TestRoundTripWithAndWithoutDistributedExec(t *testing.T) { require.NoError(t, err) // check request body - body, err := io.ReadAll(req.Body) - require.NoError(t, err) + body := []byte(req.PostFormValue("plan")) if tc.expectEmptyBody { require.Empty(t, body) } else { diff --git a/pkg/querier/tripperware/queryrange/query_range_test.go b/pkg/querier/tripperware/queryrange/query_range_test.go index 27fba6b1ba..20d69d0fda 100644 --- a/pkg/querier/tripperware/queryrange/query_range_test.go +++ b/pkg/querier/tripperware/queryrange/query_range_test.go @@ -71,7 +71,6 @@ func TestRequest(t *testing.T) { expectedErr: queryapi.ErrStepTooSmall, }, } { - tc := tc t.Run(tc.url, func(t *testing.T) { t.Parallel() r, err := http.NewRequest("POST", tc.url, http.NoBody) @@ -265,7 +264,6 @@ func TestResponse(t *testing.T) { }, } for i, tc := range testCases { - tc := tc t.Run(strconv.Itoa(i), func(t *testing.T) { t.Parallel() protobuf, err := proto.Marshal(tc.promBody) @@ -398,7 +396,6 @@ func TestResponseWithStats(t *testing.T) { isProtobuf: false, }, } { - tc := tc t.Run(strconv.Itoa(i), func(t *testing.T) { t.Parallel() protobuf, err := proto.Marshal(tc.promBody) @@ -1182,7 +1179,6 @@ func TestMergeAPIResponses(t *testing.T) { }, }, }} { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() ctx, cancelCtx := context.WithCancel(user.InjectOrgID(context.Background(), "1")) diff --git a/pkg/querier/tripperware/queryrange/results_cache.go b/pkg/querier/tripperware/queryrange/results_cache.go index db6d2f284f..96f24516be 100644 --- a/pkg/querier/tripperware/queryrange/results_cache.go +++ b/pkg/querier/tripperware/queryrange/results_cache.go @@ -293,11 +293,9 @@ func (s resultsCache) Do(ctx context.Context, r tripperware.Request) (tripperwar // shouldCacheResponse says whether the response should be cached or not. func (s resultsCache) shouldCacheResponse(ctx context.Context, req tripperware.Request, r tripperware.Response, maxCacheTime int64) bool { headerValues := getHeaderValuesWithName(r, cacheControlHeader) - for _, v := range headerValues { - if v == noStoreValue { - level.Debug(util_log.WithContext(ctx, s.logger)).Log("msg", fmt.Sprintf("%s header in response is equal to %s, not caching the response", cacheControlHeader, noStoreValue)) - return false - } + if slices.Contains(headerValues, noStoreValue) { + level.Debug(util_log.WithContext(ctx, s.logger)).Log("msg", fmt.Sprintf("%s header in response is equal to %s, not caching the response", cacheControlHeader, noStoreValue)) + return false } if !s.isAtModifierCachable(ctx, req, maxCacheTime) { @@ -335,7 +333,12 @@ func (s resultsCache) isAtModifierCachable(ctx context.Context, r tripperware.Re } // This resolves the start() and end() used with the @ modifier. - expr = promql.PreprocessExpr(expr, timestamp.Time(r.GetStart()), timestamp.Time(r.GetEnd())) + expr, err = promql.PreprocessExpr(expr, timestamp.Time(r.GetStart()), timestamp.Time(r.GetEnd()), time.Duration(r.GetStep())*time.Millisecond) + if err != nil { + // We are being pessimistic in such cases. + level.Warn(util_log.WithContext(ctx, s.logger)).Log("msg", "failed to preprocess expr", "query", query, "err", err) + return false + } end := r.GetEnd() atModCachable := true diff --git a/pkg/querier/tripperware/queryrange/results_cache_test.go b/pkg/querier/tripperware/queryrange/results_cache_test.go index 8d4c32cfd2..05e968fb6e 100644 --- a/pkg/querier/tripperware/queryrange/results_cache_test.go +++ b/pkg/querier/tripperware/queryrange/results_cache_test.go @@ -298,7 +298,6 @@ func TestStatsCacheQuerySamples(t *testing.T) { expectedResponse: mkAPIResponseWithStats(0, 100, 10, false, false), }, } { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() cfg := ResultsCacheConfig{ @@ -990,7 +989,6 @@ func TestPartition(t *testing.T) { expectedScannedSamplesFromCachedResponse: getScannedSamples(100, 105, 10), }, } { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() s := resultsCache{ @@ -1243,7 +1241,6 @@ func TestHandleHit(t *testing.T) { }, }, } { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() sut := resultsCache{ @@ -1373,7 +1370,6 @@ func TestResultsCacheMaxFreshness(t *testing.T) { expectedResponse: parsedResponse, }, } { - tc := tc t.Run(strconv.Itoa(i), func(t *testing.T) { t.Parallel() var cfg ResultsCacheConfig @@ -1477,7 +1473,6 @@ func TestSplitter_generateCacheKey(t *testing.T) { {"3d5h", &tripperware.PrometheusRequest{Start: toMs(77 * time.Hour), Step: 10, Query: "foo{}"}, 24 * time.Hour, "fake:foo{}:10:3"}, } for _, tt := range tests { - tt := tt t.Run(fmt.Sprintf("%s - %s", tt.name, tt.interval), func(t *testing.T) { t.Parallel() ctx := user.InjectOrgID(context.Background(), "1") @@ -1526,7 +1521,6 @@ func TestResultsCacheShouldCacheFunc(t *testing.T) { } for _, tc := range testcases { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() calls := 0 diff --git a/pkg/querier/tripperware/queryrange/split_by_interval_test.go b/pkg/querier/tripperware/queryrange/split_by_interval_test.go index 31b0d82541..0e122ef35f 100644 --- a/pkg/querier/tripperware/queryrange/split_by_interval_test.go +++ b/pkg/querier/tripperware/queryrange/split_by_interval_test.go @@ -61,7 +61,6 @@ func TestNextIntervalBoundary(t *testing.T) { {toMs(day) + 15*seconds, 35 * seconds, 2*toMs(day) - 5*seconds, day}, {toMs(time.Hour) + 15*seconds, 35 * seconds, 2*toMs(time.Hour) - 15*seconds, time.Hour}, } { - tc := tc t.Run(strconv.Itoa(i), func(t *testing.T) { t.Parallel() require.Equal(t, tc.out, nextIntervalBoundary(tc.in, tc.step, tc.interval)) @@ -266,7 +265,6 @@ func TestSplitQuery(t *testing.T) { interval: 3 * time.Hour, }, } { - tc := tc t.Run(strconv.Itoa(i), func(t *testing.T) { t.Parallel() days, err := splitQuery(tc.input, tc.interval) @@ -321,7 +319,6 @@ func TestSplitByDay(t *testing.T) { intervalFn: dynamicIntervalFn(Config{SplitQueriesByInterval: day, DynamicQuerySplitsConfig: DynamicQuerySplitsConfig{MaxShardsPerQuery: 10}}, mockLimits{}, querysharding.NewQueryAnalyzer(), lookbackDelta), }, } { - tc := tc t.Run(strconv.Itoa(i), func(t *testing.T) { t.Parallel() var actualCount atomic.Int32 @@ -423,7 +420,6 @@ func Test_evaluateAtModifier(t *testing.T) { expectedErrorCode: http.StatusBadRequest, }, } { - tt := tt t.Run(tt.in, func(t *testing.T) { out, err := evaluateAtModifierFunction(tt.in, start, end) if tt.expectedErrorCode != 0 { diff --git a/pkg/querier/tripperware/queryrange/step_align_test.go b/pkg/querier/tripperware/queryrange/step_align_test.go index ac197b5b46..5a6b69f8a8 100644 --- a/pkg/querier/tripperware/queryrange/step_align_test.go +++ b/pkg/querier/tripperware/queryrange/step_align_test.go @@ -40,7 +40,6 @@ func TestStepAlign(t *testing.T) { }, }, } { - tc := tc t.Run(strconv.Itoa(i), func(t *testing.T) { t.Parallel() var result *tripperware.PrometheusRequest diff --git a/pkg/querier/tripperware/queryrange/test_utils.go b/pkg/querier/tripperware/queryrange/test_utils.go index 6e198baebb..7d37139d04 100644 --- a/pkg/querier/tripperware/queryrange/test_utils.go +++ b/pkg/querier/tripperware/queryrange/test_utils.go @@ -18,19 +18,18 @@ func genLabels( l := labelSet[0] rest := genLabels(labelSet[1:], labelBuckets) - for i := 0; i < labelBuckets; i++ { + for i := range labelBuckets { x := labels.Label{ Name: l, Value: fmt.Sprintf("%d", i), } if len(rest) == 0 { - set := labels.Labels{x} - result = append(result, set) + result = append(result, labels.FromStrings(x.Name, x.Value)) continue } for _, others := range rest { - set := append(others, x) - result = append(result, set) + builder := labels.NewBuilder(others).Set(x.Name, x.Value) + result = append(result, builder.Labels()) } } return result diff --git a/pkg/querier/tripperware/queryrange/test_utils_test.go b/pkg/querier/tripperware/queryrange/test_utils_test.go index 7e0d8268ea..8bdf75b3dd 100644 --- a/pkg/querier/tripperware/queryrange/test_utils_test.go +++ b/pkg/querier/tripperware/queryrange/test_utils_test.go @@ -2,7 +2,6 @@ package queryrange import ( "math" - "sort" "testing" "github.com/prometheus/prometheus/model/labels" @@ -12,51 +11,13 @@ import ( func TestGenLabelsCorrectness(t *testing.T) { t.Parallel() ls := genLabels([]string{"a", "b"}, 2) - for _, set := range ls { - sort.Sort(set) - } expected := []labels.Labels{ - { - labels.Label{ - Name: "a", - Value: "0", - }, - labels.Label{ - Name: "b", - Value: "0", - }, - }, - { - labels.Label{ - Name: "a", - Value: "0", - }, - labels.Label{ - Name: "b", - Value: "1", - }, - }, - { - labels.Label{ - Name: "a", - Value: "1", - }, - labels.Label{ - Name: "b", - Value: "0", - }, - }, - { - labels.Label{ - Name: "a", - Value: "1", - }, - labels.Label{ - Name: "b", - Value: "1", - }, - }, + labels.FromStrings("a", "0", "b", "0"), + labels.FromStrings("a", "0", "b", "1"), + labels.FromStrings("a", "1", "b", "0"), + labels.FromStrings("a", "1", "b", "1"), } + require.Equal(t, expected, ls) } diff --git a/pkg/querier/tripperware/queryrange/value.go b/pkg/querier/tripperware/queryrange/value.go index efa8569a9d..e13bb54fc6 100644 --- a/pkg/querier/tripperware/queryrange/value.go +++ b/pkg/querier/tripperware/queryrange/value.go @@ -58,10 +58,10 @@ func FromResult(res *promql.Result) ([]tripperware.SampleStream, error) { } func mapLabels(ls labels.Labels) []cortexpb.LabelAdapter { - result := make([]cortexpb.LabelAdapter, 0, len(ls)) - for _, l := range ls { + result := make([]cortexpb.LabelAdapter, 0, ls.Len()) + ls.Range(func(l labels.Label) { result = append(result, cortexpb.LabelAdapter(l)) - } + }) return result } diff --git a/pkg/querier/tripperware/queryrange/value_test.go b/pkg/querier/tripperware/queryrange/value_test.go index e82eadfa73..38e3053222 100644 --- a/pkg/querier/tripperware/queryrange/value_test.go +++ b/pkg/querier/tripperware/queryrange/value_test.go @@ -48,20 +48,14 @@ func TestFromValue(t *testing.T) { input: &promql.Result{ Value: promql.Vector{ promql.Sample{ - T: 1, - F: 1, - Metric: labels.Labels{ - {Name: "a", Value: "a1"}, - {Name: "b", Value: "b1"}, - }, + T: 1, + F: 1, + Metric: labels.FromStrings("a", "a1", "b", "b1"), }, promql.Sample{ - T: 2, - F: 2, - Metric: labels.Labels{ - {Name: "a", Value: "a2"}, - {Name: "b", Value: "b2"}, - }, + T: 2, + F: 2, + Metric: labels.FromStrings("a", "a2", "b", "b2"), }, }, }, @@ -98,20 +92,14 @@ func TestFromValue(t *testing.T) { input: &promql.Result{ Value: promql.Matrix{ { - Metric: labels.Labels{ - {Name: "a", Value: "a1"}, - {Name: "b", Value: "b1"}, - }, + Metric: labels.FromStrings("a", "a1", "b", "b1"), Floats: []promql.FPoint{ {T: 1, F: 1}, {T: 2, F: 2}, }, }, { - Metric: labels.Labels{ - {Name: "a", Value: "a2"}, - {Name: "b", Value: "b2"}, - }, + Metric: labels.FromStrings("a", "a2", "b", "b2"), Floats: []promql.FPoint{ {T: 1, F: 8}, {T: 2, F: 9}, @@ -158,7 +146,6 @@ func TestFromValue(t *testing.T) { } for i, c := range testExpr { - c := c t.Run(fmt.Sprintf("[%d]", i), func(t *testing.T) { t.Parallel() result, err := FromResult(c.input) diff --git a/pkg/querier/tripperware/roundtrip.go b/pkg/querier/tripperware/roundtrip.go index 144bb04da3..2ff717bae8 100644 --- a/pkg/querier/tripperware/roundtrip.go +++ b/pkg/querier/tripperware/roundtrip.go @@ -34,7 +34,7 @@ import ( "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/limiter" - util_log "github.com/cortexproject/cortex/pkg/util/log" + "github.com/cortexproject/cortex/pkg/util/requestmeta" ) const ( @@ -46,6 +46,8 @@ const ( opTypeLabelValues = "label_values" opTypeMetadata = "metadata" opTypeQueryExemplars = "query_exemplars" + opTypeFormatQuery = "format_query" + opTypeParseQuery = "parse_query" ) // HandlerFunc is like http.HandlerFunc, but for Handler. @@ -152,6 +154,8 @@ func NewQueryTripperware( isLabelValues := strings.HasSuffix(r.URL.Path, "/values") isMetadata := strings.HasSuffix(r.URL.Path, "/metadata") isQueryExemplars := strings.HasSuffix(r.URL.Path, "/query_exemplars") + isFormatQuery := strings.HasSuffix(r.URL.Path, "/format_query") + isParseQuery := strings.HasSuffix(r.URL.Path, "/parse_query") op := opTypeQuery switch { @@ -169,6 +173,10 @@ func NewQueryTripperware( op = opTypeMetadata case isQueryExemplars: op = opTypeQueryExemplars + case isFormatQuery: + op = opTypeFormatQuery + case isParseQuery: + op = opTypeParseQuery } tenantIDs, err := tenant.TenantIDs(r.Context()) @@ -179,7 +187,7 @@ func NewQueryTripperware( now := time.Now() userStr := tenant.JoinTenantIDs(tenantIDs) activeUsers.UpdateUserTimestamp(userStr, now) - source := GetSource(r.Header.Get("User-Agent")) + source := GetSource(r) queriesPerTenant.WithLabelValues(op, source, userStr).Inc() if maxSubQuerySteps > 0 && (isQuery || isQueryRange) { @@ -255,8 +263,8 @@ func (q roundTripper) Do(ctx context.Context, r Request) (Response, error) { return nil, err } - if headerMap := util_log.HeaderMapFromContext(ctx); headerMap != nil { - util_log.InjectHeadersIntoHTTPRequest(headerMap, request) + if requestMetadataMap := requestmeta.MapFromContext(ctx); requestMetadataMap != nil { + requestmeta.InjectMetadataIntoHTTPRequestHeaders(requestMetadataMap, request) } if err := user.InjectOrgIDIntoHTTPRequest(ctx, request); err != nil { @@ -275,11 +283,13 @@ func (q roundTripper) Do(ctx context.Context, r Request) (Response, error) { return q.codec.DecodeResponse(ctx, response, r) } -func GetSource(userAgent string) string { - if strings.Contains(userAgent, RulerUserAgent) { +func GetSource(r *http.Request) string { + // check it for backwards compatibility + userAgent := r.Header.Get("User-Agent") + if strings.Contains(userAgent, RulerUserAgent) || requestmeta.RequestFromRuler(r.Context()) { // caller is ruler - return SourceRuler + return requestmeta.SourceRuler } - return SourceAPI + return requestmeta.SourceAPI } diff --git a/pkg/querier/tripperware/roundtrip_test.go b/pkg/querier/tripperware/roundtrip_test.go index ceb4510d47..0f4d953fee 100644 --- a/pkg/querier/tripperware/roundtrip_test.go +++ b/pkg/querier/tripperware/roundtrip_test.go @@ -38,6 +38,8 @@ const ( labelNamesQuery = "/api/v1/labels" labelValuesQuery = "/api/v1/label/label/values" metadataQuery = "/api/v1/metadata" + formatQuery = "/api/v1/format_query?query=foo/bar" + parseQuery = "/api/v1/parse_query?query=foo/bar" responseBody = `{"status":"success","data":{"resultType":"matrix","result":[{"metric":{"foo":"bar"},"values":[[1536673680,"137"],[1536673780,"137"]]}]}}` instantResponseBody = `{"status":"success","data":{"resultType":"vector","result":[{"metric":{"foo":"bar"},"values":[[1536673680,"137"],[1536673780,"137"]]}]}}` @@ -229,6 +231,30 @@ cortex_query_frontend_queries_total{op="remote_read", source="api", user="1"} 1 # HELP cortex_query_frontend_queries_total Total queries sent per tenant. # TYPE cortex_query_frontend_queries_total counter cortex_query_frontend_queries_total{op="query_range", source="api", user="1"} 1 +`, + }, + { + path: formatQuery, + expectedBody: "bar", + limits: defaultOverrides, + maxSubQuerySteps: 11000, + userAgent: "dummyUserAgent/1.2", + expectedMetric: ` +# HELP cortex_query_frontend_queries_total Total queries sent per tenant. +# TYPE cortex_query_frontend_queries_total counter +cortex_query_frontend_queries_total{op="format_query", source="api", user="1"} 1 +`, + }, + { + path: parseQuery, + expectedBody: "bar", + limits: defaultOverrides, + maxSubQuerySteps: 11000, + userAgent: "dummyUserAgent/1.2", + expectedMetric: ` +# HELP cortex_query_frontend_queries_total Total queries sent per tenant. +# TYPE cortex_query_frontend_queries_total counter +cortex_query_frontend_queries_total{op="parse_query", source="api", user="1"} 1 `, }, { diff --git a/pkg/querier/tripperware/shard_by.go b/pkg/querier/tripperware/shard_by.go index 5cd23459e1..9053e522e2 100644 --- a/pkg/querier/tripperware/shard_by.go +++ b/pkg/querier/tripperware/shard_by.go @@ -92,7 +92,7 @@ func (s shardBy) Do(ctx context.Context, r Request) (Response, error) { func (s shardBy) shardQuery(l log.Logger, verticalShardSize int, r Request, analysis querysharding.QueryAnalysis) []Request { reqs := make([]Request, verticalShardSize) - for i := 0; i < verticalShardSize; i++ { + for i := range verticalShardSize { q, err := cquerysharding.InjectShardingInfo(r.GetQuery(), &storepb.ShardInfo{ TotalShards: int64(verticalShardSize), ShardIndex: int64(i), diff --git a/pkg/querier/tripperware/util.go b/pkg/querier/tripperware/util.go index c1e2144b96..90f2224c11 100644 --- a/pkg/querier/tripperware/util.go +++ b/pkg/querier/tripperware/util.go @@ -38,11 +38,8 @@ func DoRequests(ctx context.Context, downstream Handler, reqs []Request, limits }() respChan, errChan := make(chan RequestResponse), make(chan error) - parallelism := validation.SmallestPositiveIntPerTenant(tenantIDs, limits.MaxQueryParallelism) - if parallelism > len(reqs) { - parallelism = len(reqs) - } - for i := 0; i < parallelism; i++ { + parallelism := min(validation.SmallestPositiveIntPerTenant(tenantIDs, limits.MaxQueryParallelism), len(reqs)) + for range parallelism { go func() { for req := range intermediate { resp, err := downstream.Do(ctx, req) diff --git a/pkg/querier/worker/frontend_processor.go b/pkg/querier/worker/frontend_processor.go index 17bd031acf..88f7f31139 100644 --- a/pkg/querier/worker/frontend_processor.go +++ b/pkg/querier/worker/frontend_processor.go @@ -17,6 +17,7 @@ import ( querier_stats "github.com/cortexproject/cortex/pkg/querier/stats" "github.com/cortexproject/cortex/pkg/util/backoff" util_log "github.com/cortexproject/cortex/pkg/util/log" + "github.com/cortexproject/cortex/pkg/util/requestmeta" ) var ( @@ -129,18 +130,12 @@ func (fp *frontendProcessor) runRequest(ctx context.Context, request *httpgrpc.H for _, h := range request.Headers { headers[h.Key] = h.Values[0] } - headerMap := make(map[string]string, 0) - // Remove non-existent header. - for _, header := range fp.targetHeaders { - if v, ok := headers[textproto.CanonicalMIMEHeaderKey(header)]; ok { - headerMap[header] = v - } - } + ctx = requestmeta.ContextWithRequestMetadataMapFromHeaders(ctx, headers, fp.targetHeaders) + orgID, ok := headers[textproto.CanonicalMIMEHeaderKey(user.OrgIDHeaderName)] if ok { ctx = user.InjectOrgID(ctx, orgID) } - ctx = util_log.ContextWithHeaderMap(ctx, headerMap) logger := util_log.WithContext(ctx, fp.log) if statsEnabled { level.Info(logger).Log("msg", "started running request") diff --git a/pkg/querier/worker/frontend_processor_test.go b/pkg/querier/worker/frontend_processor_test.go index 3b10cc29a6..2881a10b0f 100644 --- a/pkg/querier/worker/frontend_processor_test.go +++ b/pkg/querier/worker/frontend_processor_test.go @@ -34,7 +34,7 @@ func TestRecvFailDoesntCancelProcess(t *testing.T) { mgr.processQueriesOnSingleStream(ctx, cc, "test:12345") }() - test.Poll(t, time.Second, true, func() interface{} { + test.Poll(t, time.Second, true, func() any { return running.Load() }) @@ -44,7 +44,7 @@ func TestRecvFailDoesntCancelProcess(t *testing.T) { assert.Equal(t, true, running.Load()) cancel() - test.Poll(t, time.Second, false, func() interface{} { + test.Poll(t, time.Second, false, func() any { return running.Load() }) } @@ -61,18 +61,18 @@ func TestContextCancelStopsProcess(t *testing.T) { pm := newProcessorManager(ctx, &mockProcessor{}, cc, "test") pm.concurrency(1) - test.Poll(t, time.Second, 1, func() interface{} { + test.Poll(t, time.Second, 1, func() any { return int(pm.currentProcessors.Load()) }) cancel() - test.Poll(t, time.Second, 0, func() interface{} { + test.Poll(t, time.Second, 0, func() any { return int(pm.currentProcessors.Load()) }) pm.stop() - test.Poll(t, time.Second, 0, func() interface{} { + test.Poll(t, time.Second, 0, func() any { return int(pm.currentProcessors.Load()) }) } diff --git a/pkg/querier/worker/scheduler_processor.go b/pkg/querier/worker/scheduler_processor.go index 0d14921028..3bba598044 100644 --- a/pkg/querier/worker/scheduler_processor.go +++ b/pkg/querier/worker/scheduler_processor.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "net/http" - "net/textproto" "time" "github.com/go-kit/log" @@ -28,10 +27,11 @@ import ( "github.com/cortexproject/cortex/pkg/util/httpgrpcutil" util_log "github.com/cortexproject/cortex/pkg/util/log" cortexmiddleware "github.com/cortexproject/cortex/pkg/util/middleware" + "github.com/cortexproject/cortex/pkg/util/requestmeta" "github.com/cortexproject/cortex/pkg/util/services" ) -func newSchedulerProcessor(cfg Config, handler RequestHandler, log log.Logger, reg prometheus.Registerer) (*schedulerProcessor, []services.Service) { +func newSchedulerProcessor(cfg Config, handler RequestHandler, log log.Logger, reg prometheus.Registerer, querierAddress string) (*schedulerProcessor, []services.Service) { p := &schedulerProcessor{ log: log, handler: handler, @@ -47,6 +47,7 @@ func newSchedulerProcessor(cfg Config, handler RequestHandler, log log.Logger, r Help: "Time spend doing requests to frontend.", Buckets: prometheus.ExponentialBuckets(0.001, 4, 6), }, []string{"operation", "status_code"}), + querierAddress: querierAddress, } frontendClientsGauge := promauto.With(reg).NewGauge(prometheus.GaugeOpts{ @@ -71,6 +72,7 @@ type schedulerProcessor struct { grpcConfig grpcclient.Config maxMessageSize int querierID string + querierAddress string frontendPool *client.Pool frontendClientRequestDuration *prometheus.HistogramVec @@ -97,7 +99,7 @@ func (sp *schedulerProcessor) processQueriesOnSingleStream(ctx context.Context, for backoff.Ongoing() { c, err := schedulerClient.QuerierLoop(ctx) if err == nil { - err = c.Send(&schedulerpb.QuerierToScheduler{QuerierID: sp.querierID}) + err = c.Send(&schedulerpb.QuerierToScheduler{QuerierID: sp.querierID, QuerierAddress: sp.querierAddress}) } if err != nil { @@ -141,14 +143,7 @@ func (sp *schedulerProcessor) querierLoop(c schedulerpb.SchedulerForQuerier_Quer for _, h := range request.HttpRequest.Headers { headers[h.Key] = h.Values[0] } - headerMap := make(map[string]string, 0) - // Remove non-existent header. - for _, header := range sp.targetHeaders { - if v, ok := headers[textproto.CanonicalMIMEHeaderKey(header)]; ok { - headerMap[header] = v - } - } - ctx = util_log.ContextWithHeaderMap(ctx, headerMap) + ctx = requestmeta.ContextWithRequestMetadataMapFromHeaders(ctx, headers, sp.targetHeaders) tracer := opentracing.GlobalTracer() // Ignore errors here. If we cannot get parent span, we just don't create new one. diff --git a/pkg/querier/worker/scheduler_processor_test.go b/pkg/querier/worker/scheduler_processor_test.go index c3d2534e44..74fc4c58b8 100644 --- a/pkg/querier/worker/scheduler_processor_test.go +++ b/pkg/querier/worker/scheduler_processor_test.go @@ -92,12 +92,12 @@ func (m *mockQuerierLoopClient) Context() context.Context { return args.Get(0).(context.Context) } -func (m *mockQuerierLoopClient) SendMsg(msg interface{}) error { +func (m *mockQuerierLoopClient) SendMsg(msg any) error { args := m.Called(msg) return args.Error(0) } -func (m *mockQuerierLoopClient) RecvMsg(msg interface{}) error { +func (m *mockQuerierLoopClient) RecvMsg(msg any) error { args := m.Called(msg) return args.Error(0) } @@ -144,7 +144,7 @@ func Test_ToShowNotPanic_RelatedIssue6599(t *testing.T) { go stat.AddFetchedChunkBytes(10) }).Return(&httpgrpc.HTTPResponse{}, nil) - sp, _ := newSchedulerProcessor(cfg, requestHandler, log.NewNopLogger(), nil) + sp, _ := newSchedulerProcessor(cfg, requestHandler, log.NewNopLogger(), nil, "") schedulerClient := &mockSchedulerForQuerierClient{} schedulerClient.On("QuerierLoop", mock.Anything, mock.Anything).Return(querierLoopClient, nil) diff --git a/pkg/querier/worker/worker.go b/pkg/querier/worker/worker.go index 90e32b7aff..aa09f079b8 100644 --- a/pkg/querier/worker/worker.go +++ b/pkg/querier/worker/worker.go @@ -3,7 +3,9 @@ package worker import ( "context" "flag" + "net" "os" + "strconv" "sync" "time" @@ -14,7 +16,9 @@ import ( "github.com/weaveworks/common/httpgrpc" "google.golang.org/grpc" + "github.com/cortexproject/cortex/pkg/ring" "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/flagext" "github.com/cortexproject/cortex/pkg/util/grpcclient" "github.com/cortexproject/cortex/pkg/util/services" ) @@ -33,6 +37,10 @@ type Config struct { GRPCClientConfig grpcclient.Config `yaml:"grpc_client_config"` TargetHeaders []string `yaml:"-"` // Propagated by config. + + InstanceInterfaceNames []string `yaml:"instance_interface_names"` + ListenPort int `yaml:"-"` + InstanceAddr string `yaml:"instance_addr" doc:"hidden"` } func (cfg *Config) RegisterFlags(f *flag.FlagSet) { @@ -46,6 +54,10 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.StringVar(&cfg.QuerierID, "querier.id", "", "Querier ID, sent to frontend service to identify requests from the same querier. Defaults to hostname.") cfg.GRPCClientConfig.RegisterFlagsWithPrefix("querier.frontend-client", "", f) + + cfg.InstanceInterfaceNames = []string{"eth0", "en0"} + f.Var((*flagext.StringSlice)(&cfg.InstanceInterfaceNames), "querier.instance-interface-names", "Name of network interface to read address from.") + f.StringVar(&cfg.InstanceAddr, "querier.instance-addr", "", "IP address of the querier") } func (cfg *Config) Validate(log log.Logger) error { @@ -109,7 +121,14 @@ func NewQuerierWorker(cfg Config, handler RequestHandler, log log.Logger, reg pr level.Info(log).Log("msg", "Starting querier worker connected to query-scheduler", "scheduler", cfg.SchedulerAddress) address = cfg.SchedulerAddress - processor, servs = newSchedulerProcessor(cfg, handler, log, reg) + + ipAddr, err := ring.GetInstanceAddr(cfg.InstanceAddr, cfg.InstanceInterfaceNames, log) + if err != nil { + return nil, err + } + querierAddr := net.JoinHostPort(ipAddr, strconv.Itoa(cfg.ListenPort)) + + processor, servs = newSchedulerProcessor(cfg, handler, log, reg, querierAddr) case cfg.FrontendAddress != "": level.Info(log).Log("msg", "Starting querier worker connected to query-frontend", "frontend", cfg.FrontendAddress) diff --git a/pkg/querier/worker/worker_test.go b/pkg/querier/worker/worker_test.go index 13a21949c5..d207688306 100644 --- a/pkg/querier/worker/worker_test.go +++ b/pkg/querier/worker/worker_test.go @@ -69,7 +69,6 @@ func TestResetConcurrency(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() cfg := Config{ @@ -88,13 +87,13 @@ func TestResetConcurrency(t *testing.T) { w.AddressAdded(fmt.Sprintf("127.0.0.1:%d", i)) } - test.Poll(t, 250*time.Millisecond, tt.expectedConcurrency, func() interface{} { + test.Poll(t, 250*time.Millisecond, tt.expectedConcurrency, func() any { return getConcurrentProcessors(w) }) // now we remove an address and ensure we still have the expected concurrency w.AddressRemoved(fmt.Sprintf("127.0.0.1:%d", rand.Intn(tt.numTargets))) - test.Poll(t, 250*time.Millisecond, tt.expectedConcurrencyAfterTargetRemoval, func() interface{} { + test.Poll(t, 250*time.Millisecond, tt.expectedConcurrencyAfterTargetRemoval, func() any { return getConcurrentProcessors(w) }) diff --git a/pkg/querysharding/util.go b/pkg/querysharding/util.go index 2b438ce275..05a8552cc3 100644 --- a/pkg/querysharding/util.go +++ b/pkg/querysharding/util.go @@ -4,8 +4,10 @@ import ( "encoding/base64" "sync" + "github.com/pkg/errors" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql/parser" + "github.com/thanos-io/thanos/pkg/querysharding" "github.com/thanos-io/thanos/pkg/store/storepb" cortexparser "github.com/cortexproject/cortex/pkg/parser" @@ -16,10 +18,12 @@ const ( ) var ( - buffers = sync.Pool{New: func() interface{} { + Buffers = sync.Pool{New: func() any { b := make([]byte, 0, 100) return &b }} + + stop = errors.New("stop") ) func InjectShardingInfo(query string, shardInfo *storepb.ShardInfo) (string, error) { @@ -75,5 +79,45 @@ func ExtractShardingMatchers(matchers []*labels.Matcher) ([]*labels.Matcher, *st return r, nil, err } - return r, shardInfo.Matcher(&buffers), nil + return r, shardInfo.Matcher(&Buffers), nil +} + +type disableBinaryExpressionAnalyzer struct { + analyzer querysharding.Analyzer +} + +// NewDisableBinaryExpressionAnalyzer is a wrapper around the analyzer that disables binary expressions. +func NewDisableBinaryExpressionAnalyzer(analyzer querysharding.Analyzer) *disableBinaryExpressionAnalyzer { + return &disableBinaryExpressionAnalyzer{analyzer: analyzer} +} + +func (d *disableBinaryExpressionAnalyzer) Analyze(query string) (querysharding.QueryAnalysis, error) { + analysis, err := d.analyzer.Analyze(query) + if err != nil || !analysis.IsShardable() { + return analysis, err + } + + expr, _ := cortexparser.ParseExpr(query) + isShardable := true + parser.Inspect(expr, func(node parser.Node, nodes []parser.Node) error { + switch n := node.(type) { + case *parser.BinaryExpr: + // No vector matching means one operand is not vector. Skip it. + if n.VectorMatching == nil { + return nil + } + // Vector matching ignore will add MetricNameLabel as sharding label. + // Mark this type of query not shardable. + if !n.VectorMatching.On { + isShardable = false + return stop + } + } + return nil + }) + if !isShardable { + // Mark as not shardable. + return querysharding.QueryAnalysis{}, nil + } + return analysis, nil } diff --git a/pkg/querysharding/util_test.go b/pkg/querysharding/util_test.go new file mode 100644 index 0000000000..cba2319072 --- /dev/null +++ b/pkg/querysharding/util_test.go @@ -0,0 +1,145 @@ +package querysharding + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/thanos-io/thanos/pkg/querysharding" +) + +func TestDisableBinaryExpressionAnalyzer_Analyze(t *testing.T) { + tests := []struct { + name string + query string + expectShardable bool + expectError bool + description string + }{ + { + name: "binary expression with vector matching on", + query: `up{job="prometheus"} + on(instance) rate(cpu_usage[5m])`, + expectShardable: true, + expectError: false, + description: "Binary expression with 'on' matching should remain shardable", + }, + { + name: "binary expression without explicit vector matching", + query: `up{job="prometheus"} + rate(cpu_usage[5m])`, + expectShardable: false, + expectError: false, + description: "No explicit vector matching means without. Not shardable.", + }, + { + name: "binary expression with vector matching ignoring", + query: `up{job="prometheus"} + ignoring(instance) rate(cpu_usage[5m])`, + expectShardable: false, + expectError: false, + description: "Binary expression with 'ignoring' matching should not be shardable", + }, + { + name: "complex expression with binary expr using on", + query: `sum(rate(http_requests_total[5m])) by (job) + on(job) avg(cpu_usage) by (job)`, + expectShardable: true, + expectError: false, + description: "Complex expression with 'on' matching should remain shardable", + }, + { + name: "complex expression with binary expr using ignoring", + query: `sum(rate(http_requests_total[5m])) by (job) + ignoring(instance) avg(cpu_usage) by (job)`, + expectShardable: false, + expectError: false, + description: "Complex expression with 'ignoring' matching should not be shardable", + }, + { + name: "nested binary expressions with one ignoring", + query: `(up + on(job) rate(cpu[5m])) * ignoring(instance) memory_usage`, + expectShardable: false, + expectError: false, + description: "Nested expressions with any 'ignoring' should not be shardable", + }, + { + name: "aggregation", + query: `sum(rate(http_requests_total[5m])) by (job)`, + expectShardable: true, + expectError: false, + description: "Aggregations should remain shardable", + }, + { + name: "aggregation with binary expression and scalar", + query: `sum(rate(http_requests_total[5m])) by (job) * 100`, + expectShardable: true, + expectError: false, + description: "Aggregations should remain shardable", + }, + { + name: "invalid query", + query: "invalid{query", + expectShardable: false, + expectError: true, + description: "Invalid queries should return error", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create the actual thanos analyzer + thanosAnalyzer := querysharding.NewQueryAnalyzer() + + // Wrap it with our disable binary expression analyzer + analyzer := NewDisableBinaryExpressionAnalyzer(thanosAnalyzer) + + // Test the wrapped analyzer + result, err := analyzer.Analyze(tt.query) + + if tt.expectError { + require.Error(t, err, tt.description) + return + } + + require.NoError(t, err, tt.description) + assert.Equal(t, tt.expectShardable, result.IsShardable(), tt.description) + }) + } +} + +func TestDisableBinaryExpressionAnalyzer_ComparedToOriginal(t *testing.T) { + // Test cases that verify the wrapper correctly modifies behavior + testCases := []struct { + name string + query string + }{ + { + name: "ignoring expression should be disabled", + query: `up + ignoring(instance) rate(cpu[5m])`, + }, + { + name: "nested ignoring expression should be disabled", + query: `(sum(rate(http_requests_total[5m])) by (job)) + ignoring(instance) avg(cpu_usage) by (job)`, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + // Test with original analyzer + originalAnalyzer := querysharding.NewQueryAnalyzer() + originalResult, err := originalAnalyzer.Analyze(tc.query) + require.NoError(t, err) + + // Test with wrapped analyzer + wrappedAnalyzer := NewDisableBinaryExpressionAnalyzer(originalAnalyzer) + wrappedResult, err := wrappedAnalyzer.Analyze(tc.query) + require.NoError(t, err) + + // The wrapped analyzer should make previously shardable queries non-shardable + // if they contain binary expressions with ignoring + if originalResult.IsShardable() { + assert.False(t, wrappedResult.IsShardable(), + "Wrapped analyzer should disable sharding for queries with ignoring vector matching") + } else { + // If original wasn't shardable, wrapped shouldn't be either + assert.False(t, wrappedResult.IsShardable()) + } + }) + } +} diff --git a/pkg/ring/basic_lifecycler.go b/pkg/ring/basic_lifecycler.go index 70491b1a1b..fb751e4fa1 100644 --- a/pkg/ring/basic_lifecycler.go +++ b/pkg/ring/basic_lifecycler.go @@ -271,7 +271,7 @@ heartbeatLoop: func (l *BasicLifecycler) registerInstance(ctx context.Context) error { var instanceDesc InstanceDesc - err := l.store.CAS(ctx, l.ringKey, func(in interface{}) (out interface{}, retry bool, err error) { + err := l.store.CAS(ctx, l.ringKey, func(in any) (out any, retry bool, err error) { ringDesc := GetOrCreateRingDesc(in) var exists bool @@ -392,7 +392,7 @@ func (l *BasicLifecycler) verifyTokens(ctx context.Context) bool { func (l *BasicLifecycler) unregisterInstance(ctx context.Context) error { level.Info(l.logger).Log("msg", "unregistering instance from ring", "ring", l.ringName) - err := l.store.CAS(ctx, l.ringKey, func(in interface{}) (out interface{}, retry bool, err error) { + err := l.store.CAS(ctx, l.ringKey, func(in any) (out any, retry bool, err error) { if in == nil { return nil, false, fmt.Errorf("found empty ring when trying to unregister") } @@ -418,7 +418,7 @@ func (l *BasicLifecycler) unregisterInstance(ctx context.Context) error { func (l *BasicLifecycler) updateInstance(ctx context.Context, update func(*Desc, *InstanceDesc) bool) error { var instanceDesc InstanceDesc - err := l.store.CAS(ctx, l.ringKey, func(in interface{}) (out interface{}, retry bool, err error) { + err := l.store.CAS(ctx, l.ringKey, func(in any) (out any, retry bool, err error) { ringDesc := GetOrCreateRingDesc(in) var ok bool diff --git a/pkg/ring/basic_lifecycler_delegates_test.go b/pkg/ring/basic_lifecycler_delegates_test.go index 1a81233ac9..cb7e4672f4 100644 --- a/pkg/ring/basic_lifecycler_delegates_test.go +++ b/pkg/ring/basic_lifecycler_delegates_test.go @@ -172,7 +172,7 @@ func TestTokensPersistencyDelegate_ShouldHandleTheCaseTheInstanceIsAlreadyInTheR defer services.StopAndAwaitTerminated(ctx, lifecycler) //nolint:errcheck // Add the instance to the ring. - require.NoError(t, store.CAS(ctx, testRingKey, func(in interface{}) (out interface{}, retry bool, err error) { + require.NoError(t, store.CAS(ctx, testRingKey, func(in any) (out any, retry bool, err error) { ringDesc := NewDesc() ringDesc.AddIngester(cfg.ID, cfg.Addr, cfg.Zone, testData.initialTokens, testData.initialState, registeredAt) return ringDesc, true, nil @@ -278,7 +278,7 @@ func TestAutoForgetDelegate(t *testing.T) { require.NoError(t, err) // Setup the initial state of the ring. - require.NoError(t, store.CAS(ctx, testRingKey, func(in interface{}) (out interface{}, retry bool, err error) { + require.NoError(t, store.CAS(ctx, testRingKey, func(in any) (out any, retry bool, err error) { ringDesc := NewDesc() testData.setup(ringDesc) return ringDesc, true, nil @@ -289,7 +289,7 @@ func TestAutoForgetDelegate(t *testing.T) { defer services.StopAndAwaitTerminated(ctx, lifecycler) //nolint:errcheck // Wait until an heartbeat has been sent. - test.Poll(t, time.Second, true, func() interface{} { + test.Poll(t, time.Second, true, func() any { return testutil.ToFloat64(lifecycler.metrics.heartbeats) > 0 }) diff --git a/pkg/ring/basic_lifecycler_test.go b/pkg/ring/basic_lifecycler_test.go index b21c3cd4fd..6e9d704f71 100644 --- a/pkg/ring/basic_lifecycler_test.go +++ b/pkg/ring/basic_lifecycler_test.go @@ -89,7 +89,7 @@ func TestBasicLifecycler_RegisterOnStart(t *testing.T) { // Add an initial instance to the ring. if testData.initialInstanceDesc != nil { - require.NoError(t, store.CAS(ctx, testRingKey, func(in interface{}) (out interface{}, retry bool, err error) { + require.NoError(t, store.CAS(ctx, testRingKey, func(in any) (out any, retry bool, err error) { desc := testData.initialInstanceDesc ringDesc := GetOrCreateRingDesc(in) @@ -244,7 +244,7 @@ func TestBasicLifecycler_HeartbeatWhileRunning(t *testing.T) { desc, _ := getInstanceFromStore(t, store, testInstanceID) initialTimestamp := desc.GetTimestamp() - test.Poll(t, time.Second*5, true, func() interface{} { + test.Poll(t, time.Second*5, true, func() any { desc, _ := getInstanceFromStore(t, store, testInstanceID) currTimestamp := desc.GetTimestamp() @@ -269,7 +269,7 @@ func TestBasicLifecycler_HeartbeatWhileStopping(t *testing.T) { // Since the hearbeat timestamp is in seconds we would have to wait 1s before we can assert // on it being changed, regardless the heartbeat period. To speed up this test, we're going // to reset the timestamp to 0 and then assert it has been updated. - require.NoError(t, store.CAS(ctx, testRingKey, func(in interface{}) (out interface{}, retry bool, err error) { + require.NoError(t, store.CAS(ctx, testRingKey, func(in any) (out any, retry bool, err error) { ringDesc := GetOrCreateRingDesc(in) instanceDesc := ringDesc.Ingesters[testInstanceID] instanceDesc.Timestamp = 0 @@ -278,7 +278,7 @@ func TestBasicLifecycler_HeartbeatWhileStopping(t *testing.T) { })) // Wait until the timestamp has been updated. - test.Poll(t, time.Second, true, func() interface{} { + test.Poll(t, time.Second, true, func() any { desc, _ := getInstanceFromStore(t, store, testInstanceID) currTimestamp := desc.GetTimestamp() @@ -313,11 +313,11 @@ func TestBasicLifecycler_HeartbeatAfterBackendRest(t *testing.T) { // Now we delete it from the ring to simulate a ring storage reset and we expect the next heartbeat // will restore it. - require.NoError(t, store.CAS(ctx, testRingKey, func(in interface{}) (out interface{}, retry bool, err error) { + require.NoError(t, store.CAS(ctx, testRingKey, func(in any) (out any, retry bool, err error) { return NewDesc(), true, nil })) - test.Poll(t, time.Second, true, func() interface{} { + test.Poll(t, time.Second, true, func() any { desc, ok := getInstanceFromStore(t, store, testInstanceID) return ok && desc.GetTimestamp() > 0 && @@ -371,7 +371,7 @@ func TestBasicLifecycler_TokensObservePeriod(t *testing.T) { // While the lifecycler is starting we poll the ring. As soon as the instance // is registered, we remove some tokens to simulate how gossip memberlist // reconciliation works in case of clashing tokens. - test.Poll(t, time.Second, true, func() interface{} { + test.Poll(t, time.Second, true, func() any { // Ensure the instance has been registered in the ring. desc, ok := getInstanceFromStore(t, store, testInstanceID) if !ok { @@ -379,7 +379,7 @@ func TestBasicLifecycler_TokensObservePeriod(t *testing.T) { } // Remove some tokens. - return store.CAS(ctx, testRingKey, func(in interface{}) (out interface{}, retry bool, err error) { + return store.CAS(ctx, testRingKey, func(in any) (out any, retry bool, err error) { ringDesc := GetOrCreateRingDesc(in) ringDesc.AddIngester(testInstanceID, desc.Addr, desc.Zone, Tokens{4, 5}, desc.State, time.Now()) return ringDesc, true, nil @@ -413,7 +413,7 @@ func TestBasicLifecycler_updateInstance_ShouldAddInstanceToTheRingIfDoesNotExist expectedRegisteredAt := lifecycler.GetRegisteredAt() // Now we delete it from the ring to simulate a ring storage reset. - require.NoError(t, store.CAS(ctx, testRingKey, func(in interface{}) (out interface{}, retry bool, err error) { + require.NoError(t, store.CAS(ctx, testRingKey, func(in any) (out any, retry bool, err error) { return NewDesc(), true, nil })) diff --git a/pkg/ring/bench/ring_memberlist_test.go b/pkg/ring/bench/ring_memberlist_test.go index 1366c47aa4..51fe51c196 100644 --- a/pkg/ring/bench/ring_memberlist_test.go +++ b/pkg/ring/bench/ring_memberlist_test.go @@ -81,7 +81,7 @@ func BenchmarkMemberlistReceiveWithRingDesc(b *testing.B) { const numTokens = 128 initialDesc := ring.NewDesc() { - for i := 0; i < numInstances; i++ { + for i := range numInstances { tokens := generateUniqueTokens(i, numTokens) initialDesc.AddIngester(fmt.Sprintf("instance-%d", i), "127.0.0.1", "zone", tokens, ring.ACTIVE, time.Now()) } @@ -101,9 +101,7 @@ func BenchmarkMemberlistReceiveWithRingDesc(b *testing.B) { testMsgs[i] = encodeMessage(b, "ring", testDesc) } - b.ResetTimer() - - for i := 0; i < b.N; i++ { + for i := 0; b.Loop(); i++ { mkv.NotifyMsg(testMsgs[i]) } } diff --git a/pkg/ring/client/pool.go b/pkg/ring/client/pool.go index 981a7399dd..e7b822592e 100644 --- a/pkg/ring/client/pool.go +++ b/pkg/ring/client/pool.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "io" + "slices" "sync" "time" @@ -13,7 +14,6 @@ import ( "github.com/weaveworks/common/user" "google.golang.org/grpc/health/grpc_health_v1" - "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/services" ) @@ -165,7 +165,7 @@ func (p *Pool) removeStaleClients() { } for _, addr := range p.RegisteredAddresses() { - if util.StringsContain(serviceAddrs, addr) { + if slices.Contains(serviceAddrs, addr) { continue } level.Info(p.logger).Log("msg", "removing stale client", "addr", addr) diff --git a/pkg/ring/client/pool_test.go b/pkg/ring/client/pool_test.go index 5ab735966b..948d49d3a0 100644 --- a/pkg/ring/client/pool_test.go +++ b/pkg/ring/client/pool_test.go @@ -21,13 +21,6 @@ type mockClient struct { status grpc_health_v1.HealthCheckResponse_ServingStatus } -func (i mockClient) List(ctx context.Context, in *grpc_health_v1.HealthListRequest, opts ...grpc.CallOption) (*grpc_health_v1.HealthListResponse, error) { - if !i.happy { - return nil, fmt.Errorf("Fail") - } - return &grpc_health_v1.HealthListResponse{}, nil -} - func (i mockClient) Check(ctx context.Context, in *grpc_health_v1.HealthCheckRequest, opts ...grpc.CallOption) (*grpc_health_v1.HealthCheckResponse, error) { if !i.happy { return nil, fmt.Errorf("Fail") diff --git a/pkg/ring/http.go b/pkg/ring/http.go index cbef6f3ce6..023b716fef 100644 --- a/pkg/ring/http.go +++ b/pkg/ring/http.go @@ -94,7 +94,7 @@ func init() { } func (r *Ring) forget(ctx context.Context, id string) error { - unregister := func(in interface{}) (out interface{}, retry bool, err error) { + unregister := func(in any) (out any, retry bool, err error) { if in == nil { return nil, false, fmt.Errorf("found empty ring when trying to unregister") } diff --git a/pkg/ring/kv/client.go b/pkg/ring/kv/client.go index eae1ee2518..163e48a6f2 100644 --- a/pkg/ring/kv/client.go +++ b/pkg/ring/kv/client.go @@ -39,11 +39,11 @@ var inmemoryStoreInit sync.Once var inmemoryStore Client // StoreConfig is a configuration used for building single store client, either -// Consul, Etcd, Memberlist or MultiClient. It was extracted from Config to keep +// Consul, DynamoDB, Etcd, Memberlist or MultiClient. It was extracted from Config to keep // single-client config separate from final client-config (with all the wrappers) type StoreConfig struct { - DynamoDB dynamodb.Config `yaml:"dynamodb"` Consul consul.Config `yaml:"consul"` + DynamoDB dynamodb.Config `yaml:"dynamodb"` Etcd etcd.Config `yaml:"etcd"` Multi MultiConfig `yaml:"multi"` @@ -81,7 +81,7 @@ func (cfg *Config) RegisterFlagsWithPrefix(flagsPrefix, defaultPrefix string, f flagsPrefix = "ring." } f.StringVar(&cfg.Prefix, flagsPrefix+"prefix", defaultPrefix, "The prefix for the keys in the store. Should end with a /.") - f.StringVar(&cfg.Store, flagsPrefix+"store", "consul", "Backend storage to use for the ring. Supported values are: consul, etcd, inmemory, memberlist, multi.") + f.StringVar(&cfg.Store, flagsPrefix+"store", "consul", "Backend storage to use for the ring. Supported values are: consul, dynamodb, etcd, inmemory, memberlist, multi.") } // Client is a high-level client for key-value stores (such as Etcd and @@ -95,7 +95,7 @@ type Client interface { // Get a specific key. Will use a codec to deserialise key to appropriate type. // If the key does not exist, Get will return nil and no error. - Get(ctx context.Context, key string) (interface{}, error) + Get(ctx context.Context, key string) (any, error) // Delete a specific key. Deletions are best-effort and no error will // be returned if the key does not exist. @@ -108,19 +108,19 @@ type Client interface { // with new value etc. Guarantees that only a single concurrent CAS // succeeds. Callback can return nil to indicate it is happy with existing // value. - CAS(ctx context.Context, key string, f func(in interface{}) (out interface{}, retry bool, err error)) error + CAS(ctx context.Context, key string, f func(in any) (out any, retry bool, err error)) error // WatchKey calls f whenever the value stored under key changes. - WatchKey(ctx context.Context, key string, f func(interface{}) bool) + WatchKey(ctx context.Context, key string, f func(any) bool) // WatchPrefix calls f whenever any value stored under prefix changes. - WatchPrefix(ctx context.Context, prefix string, f func(string, interface{}) bool) + WatchPrefix(ctx context.Context, prefix string, f func(string, any) bool) // LastUpdateTime returns the time a key was last sync by the kv store LastUpdateTime(key string) time.Time } -// NewClient creates a new Client (consul, etcd or inmemory) based on the config, +// NewClient creates a new Client based on the config, // encodes and decodes data for storage using the codec. func NewClient(cfg Config, codec codec.Codec, reg prometheus.Registerer, logger log.Logger) (Client, error) { if cfg.Mock != nil { diff --git a/pkg/ring/kv/client_test.go b/pkg/ring/kv/client_test.go index b31f904d08..26a0c20f6c 100644 --- a/pkg/ring/kv/client_test.go +++ b/pkg/ring/kv/client_test.go @@ -64,7 +64,7 @@ func Test_createClient_singleBackend_mustContainRoleAndTypeLabels(t *testing.T) reg := prometheus.NewPedanticRegistry() client, err := createClient("mock", "/test1", storeCfg, testCodec, Primary, reg, testLogger{}) require.NoError(t, err) - require.NoError(t, client.CAS(context.Background(), "/test", func(_ interface{}) (out interface{}, retry bool, err error) { + require.NoError(t, client.CAS(context.Background(), "/test", func(_ any) (out any, retry bool, err error) { out = &mockMessage{id: "inCAS"} retry = false return @@ -82,7 +82,7 @@ func Test_createClient_multiBackend_mustContainRoleAndTypeLabels(t *testing.T) { reg := prometheus.NewPedanticRegistry() client, err := createClient("multi", "/test1", storeCfg, testCodec, Primary, reg, testLogger{}) require.NoError(t, err) - require.NoError(t, client.CAS(context.Background(), "/test", func(_ interface{}) (out interface{}, retry bool, err error) { + require.NoError(t, client.CAS(context.Background(), "/test", func(_ any) (out any, retry bool, err error) { out = &mockMessage{id: "inCAS"} retry = false return @@ -154,6 +154,6 @@ func (m *mockMessage) ProtoMessage() { type testLogger struct { } -func (l testLogger) Log(keyvals ...interface{}) error { +func (l testLogger) Log(keyvals ...any) error { return nil } diff --git a/pkg/ring/kv/codec/clonable.go b/pkg/ring/kv/codec/clonable.go index c3df74c621..5b0eb38c84 100644 --- a/pkg/ring/kv/codec/clonable.go +++ b/pkg/ring/kv/codec/clonable.go @@ -2,5 +2,5 @@ package codec type Clonable interface { // Clone should return a deep copy of the state. - Clone() interface{} + Clone() any } diff --git a/pkg/ring/kv/codec/codec.go b/pkg/ring/kv/codec/codec.go index d701bbe208..9c88473e50 100644 --- a/pkg/ring/kv/codec/codec.go +++ b/pkg/ring/kv/codec/codec.go @@ -10,11 +10,11 @@ import ( // Codec allows KV clients to serialise and deserialise values. type Codec interface { - Decode([]byte) (interface{}, error) - Encode(interface{}) ([]byte, error) + Decode([]byte) (any, error) + Encode(any) ([]byte, error) - DecodeMultiKey(map[string][]byte) (interface{}, error) - EncodeMultiKey(interface{}) (map[string][]byte, error) + DecodeMultiKey(map[string][]byte) (any, error) + EncodeMultiKey(any) (map[string][]byte, error) // CodecID is a short identifier to communicate what codec should be used to decode the value. // Once in use, this should be stable to avoid confusing other clients. @@ -36,12 +36,12 @@ func (p Proto) CodecID() string { } // Decode implements Codec -func (p Proto) Decode(bytes []byte) (interface{}, error) { +func (p Proto) Decode(bytes []byte) (any, error) { return p.decode(bytes, p.factory()) } // DecodeMultiKey implements Codec -func (p Proto) DecodeMultiKey(data map[string][]byte) (interface{}, error) { +func (p Proto) DecodeMultiKey(data map[string][]byte) (any, error) { msg := p.factory() // Don't even try out, ok := msg.(MultiKey) @@ -50,7 +50,7 @@ func (p Proto) DecodeMultiKey(data map[string][]byte) (interface{}, error) { } if len(data) > 0 { - res := make(map[string]interface{}, len(data)) + res := make(map[string]any, len(data)) for key, bytes := range data { decoded, err := p.decode(bytes, out.GetItemFactory()) if err != nil { @@ -64,7 +64,7 @@ func (p Proto) DecodeMultiKey(data map[string][]byte) (interface{}, error) { return out, nil } -func (p Proto) decode(bytes []byte, out proto.Message) (interface{}, error) { +func (p Proto) decode(bytes []byte, out proto.Message) (any, error) { bytes, err := snappy.Decode(nil, bytes) if err != nil { return nil, err @@ -76,7 +76,7 @@ func (p Proto) decode(bytes []byte, out proto.Message) (interface{}, error) { } // Encode implements Codec -func (p Proto) Encode(msg interface{}) ([]byte, error) { +func (p Proto) Encode(msg any) ([]byte, error) { bytes, err := proto.Marshal(msg.(proto.Message)) if err != nil { return nil, err @@ -85,7 +85,7 @@ func (p Proto) Encode(msg interface{}) ([]byte, error) { } // EncodeMultiKey implements Codec -func (p Proto) EncodeMultiKey(msg interface{}) (map[string][]byte, error) { +func (p Proto) EncodeMultiKey(msg any) (map[string][]byte, error) { // Don't even try r, ok := msg.(MultiKey) if !ok || r == nil { @@ -112,19 +112,19 @@ func (String) CodecID() string { } // Decode implements Codec. -func (String) Decode(bytes []byte) (interface{}, error) { +func (String) Decode(bytes []byte) (any, error) { return string(bytes), nil } // Encode implements Codec. -func (String) Encode(msg interface{}) ([]byte, error) { +func (String) Encode(msg any) ([]byte, error) { return []byte(msg.(string)), nil } -func (String) EncodeMultiKey(msg interface{}) (map[string][]byte, error) { +func (String) EncodeMultiKey(msg any) (map[string][]byte, error) { return nil, errors.New("String codec does not support EncodeMultiKey") } -func (String) DecodeMultiKey(map[string][]byte) (interface{}, error) { +func (String) DecodeMultiKey(map[string][]byte) (any, error) { return nil, errors.New("String codec does not support DecodeMultiKey") } diff --git a/pkg/ring/kv/codec/codec_test.go b/pkg/ring/kv/codec/codec_test.go index ff729626e7..99d1961cdd 100644 --- a/pkg/ring/kv/codec/codec_test.go +++ b/pkg/ring/kv/codec/codec_test.go @@ -28,7 +28,7 @@ func Test_EncodeMultikey(t *testing.T) { codec := NewProtoCodec("test", newProtoDescMock) descMock := &DescMock{} expectedSplitKeys := []string{"t1", "t2"} - expectedSplit := map[string]interface{}{ + expectedSplit := map[string]any{ expectedSplitKeys[0]: descMock, expectedSplitKeys[1]: descMock, } @@ -94,17 +94,17 @@ func newProtoDescMock() proto.Message { return &DescMock{} } -func (m *DescMock) Clone() interface{} { +func (m *DescMock) Clone() any { args := m.Called() return args.Get(0) } -func (m *DescMock) SplitByID() map[string]interface{} { +func (m *DescMock) SplitByID() map[string]any { args := m.Called() - return args.Get(0).(map[string]interface{}) + return args.Get(0).(map[string]any) } -func (m *DescMock) JoinIds(map[string]interface{}) { +func (m *DescMock) JoinIds(map[string]any) { m.Called() } @@ -113,7 +113,7 @@ func (m *DescMock) GetItemFactory() proto.Message { return args.Get(0).(proto.Message) } -func (m *DescMock) FindDifference(that MultiKey) (interface{}, []string, error) { +func (m *DescMock) FindDifference(that MultiKey) (any, []string, error) { args := m.Called(that) var err error if args.Get(2) != nil { diff --git a/pkg/ring/kv/codec/multikey.go b/pkg/ring/kv/codec/multikey.go index bd8802c4ad..b2e9f12abc 100644 --- a/pkg/ring/kv/codec/multikey.go +++ b/pkg/ring/kv/codec/multikey.go @@ -9,11 +9,11 @@ type MultiKey interface { // SplitByID Split interface in array of key and value. THe key is a unique identifier of an instance in the ring. The value is // interface with its data. The interface resultant need to be a proto.Message - SplitByID() map[string]interface{} + SplitByID() map[string]any // JoinIds update the current interface to add receiving key value information. The key is an unique identifier for an instance. // The value is the information for that instance. - JoinIds(in map[string]interface{}) + JoinIds(in map[string]any) // GetItemFactory method to be used for deserilaize the value information from an instance GetItemFactory() proto.Message @@ -21,5 +21,5 @@ type MultiKey interface { // FindDifference returns the difference between two Multikeys. The returns are an interface which also implements Multikey // with an array of keys which were changed, and an array of strings which are unique identifiers deleted. An error is // returned when that does not implement the correct codec - FindDifference(that MultiKey) (interface{}, []string, error) + FindDifference(that MultiKey) (any, []string, error) } diff --git a/pkg/ring/kv/consul/client.go b/pkg/ring/kv/consul/client.go index a9ecdc279e..7e86bd8aef 100644 --- a/pkg/ring/kv/consul/client.go +++ b/pkg/ring/kv/consul/client.go @@ -146,7 +146,7 @@ func NewClient(cfg Config, codec codec.Codec, logger log.Logger, registerer prom } // Put is mostly here for testing. -func (c *Client) Put(ctx context.Context, key string, value interface{}) error { +func (c *Client) Put(ctx context.Context, key string, value any) error { bytes, err := c.codec.Encode(value) if err != nil { return err @@ -163,13 +163,13 @@ func (c *Client) Put(ctx context.Context, key string, value interface{}) error { // CAS atomically modifies a value in a callback. // If value doesn't exist you'll get nil as an argument to your callback. -func (c *Client) CAS(ctx context.Context, key string, f func(in interface{}) (out interface{}, retry bool, err error)) error { +func (c *Client) CAS(ctx context.Context, key string, f func(in any) (out any, retry bool, err error)) error { return instrument.CollectedRequest(ctx, "CAS loop", c.consulMetrics.consulRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { return c.cas(ctx, key, f) }) } -func (c *Client) cas(ctx context.Context, key string, f func(in interface{}) (out interface{}, retry bool, err error)) error { +func (c *Client) cas(ctx context.Context, key string, f func(in any) (out any, retry bool, err error)) error { retries := c.cfg.MaxCasRetries if retries == 0 { retries = 10 @@ -193,7 +193,7 @@ func (c *Client) cas(ctx context.Context, key string, f func(in interface{}) (ou level.Error(c.logger).Log("msg", "error getting key", "key", key, "err", err) continue } - var intermediate interface{} + var intermediate any if kvp != nil { out, err := c.codec.Decode(kvp.Value) if err != nil { @@ -247,7 +247,7 @@ func (c *Client) cas(ctx context.Context, key string, f func(in interface{}) (ou // value. To construct the deserialised value, a factory function should be // supplied which generates an empty struct for WatchKey to deserialise // into. This function blocks until the context is cancelled or f returns false. -func (c *Client) WatchKey(ctx context.Context, key string, f func(interface{}) bool) { +func (c *Client) WatchKey(ctx context.Context, key string, f func(any) bool) { var ( backoff = backoff.New(ctx, backoffConfig) index = uint64(0) @@ -308,7 +308,7 @@ func (c *Client) WatchKey(ctx context.Context, key string, f func(interface{}) b // WatchPrefix will watch a given prefix in Consul for new keys and changes to existing keys under that prefix. // When the value under said key changes, the f callback is called with the deserialised value. // Values in Consul are assumed to be JSON. This function blocks until the context is cancelled. -func (c *Client) WatchPrefix(ctx context.Context, prefix string, f func(string, interface{}) bool) { +func (c *Client) WatchPrefix(ctx context.Context, prefix string, f func(string, any) bool) { var ( backoff = backoff.New(ctx, backoffConfig) index = uint64(0) @@ -387,7 +387,7 @@ func (c *Client) List(ctx context.Context, prefix string) ([]string, error) { } // Get implements kv.Get. -func (c *Client) Get(ctx context.Context, key string) (interface{}, error) { +func (c *Client) Get(ctx context.Context, key string) (any, error) { options := &consul.QueryOptions{ AllowStale: !c.cfg.ConsistentReads, RequireConsistent: c.cfg.ConsistentReads, @@ -434,9 +434,6 @@ func (c *Client) createRateLimiter() *rate.Limiter { // burst is ignored when limit = rate.Inf return rate.NewLimiter(rate.Inf, 0) } - burst := c.cfg.WatchKeyBurstSize - if burst < 1 { - burst = 1 - } + burst := max(c.cfg.WatchKeyBurstSize, 1) return rate.NewLimiter(rate.Limit(c.cfg.WatchKeyRateLimit), burst) } diff --git a/pkg/ring/kv/consul/client_test.go b/pkg/ring/kv/consul/client_test.go index e3ab734305..35c75b58ed 100644 --- a/pkg/ring/kv/consul/client_test.go +++ b/pkg/ring/kv/consul/client_test.go @@ -28,7 +28,7 @@ func writeValuesToKV(t *testing.T, client *Client, key string, start, end int, s defer close(ch) for i := start; i <= end; i++ { t.Log("ts", time.Now(), "msg", "writing value", "val", i) - _, _ = client.kv.Put(&consul.KVPair{Key: key, Value: []byte(fmt.Sprintf("%d", i))}, nil) + _, _ = client.kv.Put(&consul.KVPair{Key: key, Value: fmt.Appendf(nil, "%d", i)}, nil) time.Sleep(sleep) } }() @@ -181,7 +181,7 @@ func TestReset(t *testing.T) { defer close(ch) for i := 0; i <= max; i++ { t.Log("ts", time.Now(), "msg", "writing value", "val", i) - _, _ = c.kv.Put(&consul.KVPair{Key: key, Value: []byte(fmt.Sprintf("%d", i))}, nil) + _, _ = c.kv.Put(&consul.KVPair{Key: key, Value: fmt.Appendf(nil, "%d", i)}, nil) if i == 1 { c.kv.(*mockKV).ResetIndex() } @@ -214,7 +214,7 @@ func observeValueForSomeTime(t *testing.T, client *Client, key string, timeout t observed := []string(nil) ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() - client.WatchKey(ctx, key, func(i interface{}) bool { + client.WatchKey(ctx, key, func(i any) bool { s, ok := i.(string) if !ok { return false @@ -248,7 +248,7 @@ func TestWatchKeyWithNoStartValue(t *testing.T) { defer fn() reported := 0 - c.WatchKey(ctx, key, func(i interface{}) bool { + c.WatchKey(ctx, key, func(i any) bool { reported++ return reported != 2 }) @@ -260,6 +260,6 @@ func TestWatchKeyWithNoStartValue(t *testing.T) { type testLogger struct { } -func (l testLogger) Log(keyvals ...interface{}) error { +func (l testLogger) Log(keyvals ...any) error { return nil } diff --git a/pkg/ring/kv/dynamodb/client.go b/pkg/ring/kv/dynamodb/client.go index 0fb53294d1..9c3e45b65b 100644 --- a/pkg/ring/kv/dynamodb/client.go +++ b/pkg/ring/kv/dynamodb/client.go @@ -98,7 +98,7 @@ func (c *Client) List(ctx context.Context, key string) ([]string, error) { return resp, err } -func (c *Client) Get(ctx context.Context, key string) (interface{}, error) { +func (c *Client) Get(ctx context.Context, key string) (any, error) { resp, _, err := c.kv.Query(ctx, dynamodbKey{primaryKey: key}, false) if err != nil { level.Warn(c.logger).Log("msg", "error Get", "key", key, "err", err) @@ -135,7 +135,7 @@ func (c *Client) Delete(ctx context.Context, key string) error { return err } -func (c *Client) CAS(ctx context.Context, key string, f func(in interface{}) (out interface{}, retry bool, err error)) error { +func (c *Client) CAS(ctx context.Context, key string, f func(in any) (out any, retry bool, err error)) error { bo := backoff.New(ctx, c.backoffConfig) for bo.Ongoing() { c.ddbMetrics.dynamodbCasAttempts.Inc() @@ -185,9 +185,16 @@ func (c *Client) CAS(ctx context.Context, key string, f func(in interface{}) (ou continue } - putRequests := map[dynamodbKey][]byte{} + putRequests := map[dynamodbKey]dynamodbItem{} for childKey, bytes := range buf { - putRequests[dynamodbKey{primaryKey: key, sortKey: childKey}] = bytes + version := int64(0) + if ddbItem, ok := resp[childKey]; ok { + version = ddbItem.version + } + putRequests[dynamodbKey{primaryKey: key, sortKey: childKey}] = dynamodbItem{ + data: bytes, + version: version, + } } deleteRequests := make([]dynamodbKey, 0, len(toDelete)) @@ -196,9 +203,13 @@ func (c *Client) CAS(ctx context.Context, key string, f func(in interface{}) (ou } if len(putRequests) > 0 || len(deleteRequests) > 0 { - err = c.kv.Batch(ctx, putRequests, deleteRequests) + retry, err := c.kv.Batch(ctx, putRequests, deleteRequests) if err != nil { - return err + if !retry { + return err + } + bo.Wait() + continue } c.updateStaleData(key, r, time.Now().UTC()) return nil @@ -218,7 +229,7 @@ func (c *Client) CAS(ctx context.Context, key string, f func(in interface{}) (ou return err } -func (c *Client) WatchKey(ctx context.Context, key string, f func(interface{}) bool) { +func (c *Client) WatchKey(ctx context.Context, key string, f func(any) bool) { bo := backoff.New(ctx, c.backoffConfig) for bo.Ongoing() { @@ -260,7 +271,7 @@ func (c *Client) WatchKey(ctx context.Context, key string, f func(interface{}) b } } -func (c *Client) WatchPrefix(ctx context.Context, prefix string, f func(string, interface{}) bool) { +func (c *Client) WatchPrefix(ctx context.Context, prefix string, f func(string, any) bool) { bo := backoff.New(ctx, c.backoffConfig) for bo.Ongoing() { @@ -273,8 +284,8 @@ func (c *Client) WatchPrefix(ctx context.Context, prefix string, f func(string, continue } - for key, bytes := range out { - decoded, err := c.codec.Decode(bytes) + for key, ddbItem := range out { + decoded, err := c.codec.Decode(ddbItem.data) if err != nil { level.Error(c.logger).Log("msg", "error decoding key", "key", key, "err", err) continue @@ -293,8 +304,12 @@ func (c *Client) WatchPrefix(ctx context.Context, prefix string, f func(string, } } -func (c *Client) decodeMultikey(data map[string][]byte) (codec.MultiKey, error) { - res, err := c.codec.DecodeMultiKey(data) +func (c *Client) decodeMultikey(data map[string]dynamodbItem) (codec.MultiKey, error) { + multiKeyData := make(map[string][]byte, len(data)) + for key, ddbItem := range data { + multiKeyData[key] = ddbItem.data + } + res, err := c.codec.DecodeMultiKey(multiKeyData) if err != nil { return nil, err } diff --git a/pkg/ring/kv/dynamodb/client_test.go b/pkg/ring/kv/dynamodb/client_test.go index 7cefe64f75..6885998d69 100644 --- a/pkg/ring/kv/dynamodb/client_test.go +++ b/pkg/ring/kv/dynamodb/client_test.go @@ -34,11 +34,11 @@ func Test_CAS_ErrorNoRetry(t *testing.T) { c := NewClientMock(ddbMock, codecMock, TestLogger{}, prometheus.NewPedanticRegistry(), defaultPullTime, defaultBackoff) expectedErr := errors.Errorf("test") - ddbMock.On("Query").Return(map[string][]byte{}, nil).Once() + ddbMock.On("Query").Return(map[string]dynamodbItem{}, nil).Once() codecMock.On("DecodeMultiKey").Return(descMock, nil).Twice() descMock.On("Clone").Return(descMock).Once() - err := c.CAS(context.TODO(), key, func(in interface{}) (out interface{}, retry bool, err error) { + err := c.CAS(context.TODO(), key, func(in any) (out any, retry bool, err error) { return nil, false, expectedErr }) @@ -46,25 +46,60 @@ func Test_CAS_ErrorNoRetry(t *testing.T) { } func Test_CAS_Backoff(t *testing.T) { - ddbMock := NewDynamodbClientMock() - codecMock := &CodecMock{} - descMock := &DescMock{} - c := NewClientMock(ddbMock, codecMock, TestLogger{}, prometheus.NewPedanticRegistry(), defaultPullTime, defaultBackoff) - expectedErr := errors.Errorf("test") + testCases := []struct { + name string + setupMocks func(*MockDynamodbClient, *CodecMock, *DescMock, map[dynamodbKey]dynamodbItem, []dynamodbKey) + expectedQueryCalls int + expectedBatchCalls int + }{ + { + name: "query_fails_and_backs_off", + setupMocks: func(ddbMock *MockDynamodbClient, codecMock *CodecMock, descMock *DescMock, expectedBatch map[dynamodbKey]dynamodbItem, expectedDelete []dynamodbKey) { + ddbMock.On("Query").Return(map[string]dynamodbItem{}, errors.Errorf("query failed")).Once() + ddbMock.On("Query").Return(map[string]dynamodbItem{}, nil).Once() + ddbMock.On("Batch", context.TODO(), expectedBatch, expectedDelete).Return(false, nil).Once() + }, + expectedQueryCalls: 2, + expectedBatchCalls: 1, + }, + { + name: "batch_fails_and_backs_off", + setupMocks: func(ddbMock *MockDynamodbClient, codecMock *CodecMock, descMock *DescMock, expectedBatch map[dynamodbKey]dynamodbItem, expectedDelete []dynamodbKey) { + ddbMock.On("Query").Return(map[string]dynamodbItem{}, nil).Twice() + ddbMock.On("Batch", context.TODO(), expectedBatch, expectedDelete).Return(true, errors.Errorf("batch failed")).Once() + ddbMock.On("Batch", context.TODO(), expectedBatch, expectedDelete).Return(false, nil).Once() + }, + expectedQueryCalls: 2, + expectedBatchCalls: 2, + }, + } - ddbMock.On("Query").Return(map[string][]byte{}, expectedErr).Once() - ddbMock.On("Query").Return(map[string][]byte{}, nil).Once() - ddbMock.On("Batch", context.TODO(), map[dynamodbKey][]byte{}, []dynamodbKey{{primaryKey: "test", sortKey: "childkey"}}).Once() - codecMock.On("DecodeMultiKey").Return(descMock, nil).Twice() - descMock.On("Clone").Return(descMock).Once() - descMock.On("FindDifference", descMock).Return(descMock, []string{"childkey"}, nil).Once() - codecMock.On("EncodeMultiKey").Return(map[string][]byte{}, nil).Twice() + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ddbMock := NewDynamodbClientMock() + codecMock := &CodecMock{} + descMock := &DescMock{} + c := NewClientMock(ddbMock, codecMock, TestLogger{}, prometheus.NewPedanticRegistry(), defaultPullTime, defaultBackoff) - err := c.CAS(context.TODO(), key, func(in interface{}) (out interface{}, retry bool, err error) { - return descMock, true, nil - }) + expectedBatch := map[dynamodbKey]dynamodbItem{} + expectedDelete := []dynamodbKey{{primaryKey: "test", sortKey: "childkey"}} - require.NoError(t, err) + tc.setupMocks(ddbMock, codecMock, descMock, expectedBatch, expectedDelete) + + codecMock.On("DecodeMultiKey").Return(descMock, nil).Times(tc.expectedQueryCalls) + descMock.On("Clone").Return(descMock).Times(tc.expectedQueryCalls) + descMock.On("FindDifference", descMock).Return(descMock, []string{"childkey"}, nil).Times(tc.expectedBatchCalls) + codecMock.On("EncodeMultiKey").Return(map[string][]byte{}, nil).Times(tc.expectedBatchCalls) + + err := c.CAS(context.TODO(), key, func(in any) (out any, retry bool, err error) { + return descMock, true, nil + }) + + require.NoError(t, err) + ddbMock.AssertNumberOfCalls(t, "Query", tc.expectedQueryCalls) + ddbMock.AssertNumberOfCalls(t, "Batch", tc.expectedBatchCalls) + }) + } } func Test_CAS_Failed(t *testing.T) { @@ -78,9 +113,9 @@ func Test_CAS_Failed(t *testing.T) { descMock := &DescMock{} c := NewClientMock(ddbMock, codecMock, TestLogger{}, prometheus.NewPedanticRegistry(), defaultPullTime, config) - ddbMock.On("Query").Return(map[string][]byte{}, errors.Errorf("test")) + ddbMock.On("Query").Return(map[string]dynamodbItem{}, errors.Errorf("test")) - err := c.CAS(context.TODO(), key, func(in interface{}) (out interface{}, retry bool, err error) { + err := c.CAS(context.TODO(), key, func(in any) (out any, retry bool, err error) { return descMock, true, nil }) @@ -98,19 +133,19 @@ func Test_CAS_Update(t *testing.T) { expectedUpdatedKeys[0]: []byte(expectedUpdatedKeys[0]), expectedUpdatedKeys[1]: []byte(expectedUpdatedKeys[1]), } - expectedBatch := map[dynamodbKey][]byte{ - {primaryKey: key, sortKey: expectedUpdatedKeys[0]}: []byte(expectedUpdatedKeys[0]), - {primaryKey: key, sortKey: expectedUpdatedKeys[1]}: []byte(expectedUpdatedKeys[1]), + expectedBatch := map[dynamodbKey]dynamodbItem{ + {primaryKey: key, sortKey: expectedUpdatedKeys[0]}: {data: []byte(expectedUpdatedKeys[0])}, + {primaryKey: key, sortKey: expectedUpdatedKeys[1]}: {data: []byte(expectedUpdatedKeys[1])}, } - ddbMock.On("Query").Return(map[string][]byte{}, nil).Once() + ddbMock.On("Query").Return(map[string]dynamodbItem{}, nil).Once() codecMock.On("DecodeMultiKey").Return(descMock, nil).Once() descMock.On("Clone").Return(descMock).Once() descMock.On("FindDifference", descMock).Return(descMock, []string{}, nil).Once() codecMock.On("EncodeMultiKey").Return(expectedUpdated, nil).Once() - ddbMock.On("Batch", context.TODO(), expectedBatch, []dynamodbKey{}).Once() + ddbMock.On("Batch", context.TODO(), expectedBatch, []dynamodbKey{}).Return(false, nil).Once() - err := c.CAS(context.TODO(), key, func(in interface{}) (out interface{}, retry bool, err error) { + err := c.CAS(context.TODO(), key, func(in any) (out any, retry bool, err error) { return descMock, true, nil }) @@ -130,20 +165,20 @@ func Test_CAS_Delete(t *testing.T) { {primaryKey: key, sortKey: expectedToDelete[1]}, } - ddbMock.On("Query").Return(map[string][]byte{}, nil).Once() + ddbMock.On("Query").Return(map[string]dynamodbItem{}, nil).Once() codecMock.On("DecodeMultiKey").Return(descMock, nil).Once() descMock.On("Clone").Return(descMock).Once() descMock.On("FindDifference", descMock).Return(descMock, expectedToDelete, nil).Once() codecMock.On("EncodeMultiKey").Return(map[string][]byte{}, nil).Once() - ddbMock.On("Batch", context.TODO(), map[dynamodbKey][]byte{}, expectedBatch) + ddbMock.On("Batch", context.TODO(), map[dynamodbKey]dynamodbItem{}, expectedBatch).Return(false, nil).Once() - err := c.CAS(context.TODO(), key, func(in interface{}) (out interface{}, retry bool, err error) { + err := c.CAS(context.TODO(), key, func(in any) (out any, retry bool, err error) { return descMock, true, nil }) require.NoError(t, err) ddbMock.AssertNumberOfCalls(t, "Batch", 1) - ddbMock.AssertCalled(t, "Batch", context.TODO(), map[dynamodbKey][]byte{}, expectedBatch) + ddbMock.AssertCalled(t, "Batch", context.TODO(), map[dynamodbKey]dynamodbItem{}, expectedBatch) } func Test_CAS_Update_Delete(t *testing.T) { @@ -156,9 +191,9 @@ func Test_CAS_Update_Delete(t *testing.T) { expectedUpdatedKeys[0]: []byte(expectedUpdatedKeys[0]), expectedUpdatedKeys[1]: []byte(expectedUpdatedKeys[1]), } - expectedUpdateBatch := map[dynamodbKey][]byte{ - {primaryKey: key, sortKey: expectedUpdatedKeys[0]}: []byte(expectedUpdatedKeys[0]), - {primaryKey: key, sortKey: expectedUpdatedKeys[1]}: []byte(expectedUpdatedKeys[1]), + expectedUpdateBatch := map[dynamodbKey]dynamodbItem{ + {primaryKey: key, sortKey: expectedUpdatedKeys[0]}: {data: []byte(expectedUpdatedKeys[0])}, + {primaryKey: key, sortKey: expectedUpdatedKeys[1]}: {data: []byte(expectedUpdatedKeys[1])}, } expectedToDelete := []string{"test", "test2"} expectedDeleteBatch := []dynamodbKey{ @@ -166,14 +201,14 @@ func Test_CAS_Update_Delete(t *testing.T) { {primaryKey: key, sortKey: expectedToDelete[1]}, } - ddbMock.On("Query").Return(map[string][]byte{}, nil).Once() + ddbMock.On("Query").Return(map[string]dynamodbItem{}, nil).Once() codecMock.On("DecodeMultiKey").Return(descMock, nil).Once() descMock.On("Clone").Return(descMock).Once() descMock.On("FindDifference", descMock).Return(descMock, expectedToDelete, nil).Once() codecMock.On("EncodeMultiKey").Return(expectedUpdated, nil).Once() - ddbMock.On("Batch", context.TODO(), expectedUpdateBatch, expectedDeleteBatch) + ddbMock.On("Batch", context.TODO(), expectedUpdateBatch, expectedDeleteBatch).Return(false, nil).Once() - err := c.CAS(context.TODO(), key, func(in interface{}) (out interface{}, retry bool, err error) { + err := c.CAS(context.TODO(), key, func(in any) (out any, retry bool, err error) { return descMock, true, nil }) @@ -189,10 +224,10 @@ func Test_WatchKey(t *testing.T) { c := NewClientMock(ddbMock, codecMock, TestLogger{}, prometheus.NewPedanticRegistry(), 1*time.Second, defaultBackoff) timesCalled := 0 - ddbMock.On("Query").Return(map[string][]byte{}, nil) + ddbMock.On("Query").Return(map[string]dynamodbItem{}, nil) codecMock.On("DecodeMultiKey").Return(descMock, nil) - c.WatchKey(context.TODO(), key, func(i interface{}) bool { + c.WatchKey(context.TODO(), key, func(i any) bool { timesCalled++ ddbMock.AssertNumberOfCalls(t, "Query", timesCalled) codecMock.AssertNumberOfCalls(t, "DecodeMultiKey", timesCalled) @@ -207,20 +242,20 @@ func Test_WatchKey_UpdateStale(t *testing.T) { c := NewClientMock(ddbMock, codecMock, TestLogger{}, prometheus.NewPedanticRegistry(), defaultPullTime, defaultBackoff) staleData := &DescMock{} - ddbMock.On("Query").Return(map[string][]byte{}, nil).Once() + ddbMock.On("Query").Return(map[string]dynamodbItem{}, nil).Once() codecMock.On("DecodeMultiKey").Return(staleData, nil) - c.WatchKey(context.TODO(), key, func(i interface{}) bool { + c.WatchKey(context.TODO(), key, func(i any) bool { ddbMock.AssertNumberOfCalls(t, "Query", 1) codecMock.AssertNumberOfCalls(t, "DecodeMultiKey", 1) require.EqualValues(t, staleData, i) return false }) - ddbMock.On("Query").Return(map[string][]byte{}, errors.Errorf("failed")) + ddbMock.On("Query").Return(map[string]dynamodbItem{}, errors.Errorf("failed")) staleData.On("Clone").Return(staleData).Once() - c.WatchKey(context.TODO(), key, func(i interface{}) bool { + c.WatchKey(context.TODO(), key, func(i any) bool { ddbMock.AssertNumberOfCalls(t, "Query", 12) codecMock.AssertNumberOfCalls(t, "DecodeMultiKey", 1) require.EqualValues(t, staleData, i) @@ -241,19 +276,19 @@ func Test_CAS_UpdateStale(t *testing.T) { expectedUpdatedKeys[0]: []byte(expectedUpdatedKeys[0]), expectedUpdatedKeys[1]: []byte(expectedUpdatedKeys[1]), } - expectedBatch := map[dynamodbKey][]byte{ - {primaryKey: key, sortKey: expectedUpdatedKeys[0]}: []byte(expectedUpdatedKeys[0]), - {primaryKey: key, sortKey: expectedUpdatedKeys[1]}: []byte(expectedUpdatedKeys[1]), + expectedBatch := map[dynamodbKey]dynamodbItem{ + {primaryKey: key, sortKey: expectedUpdatedKeys[0]}: {data: []byte(expectedUpdatedKeys[0])}, + {primaryKey: key, sortKey: expectedUpdatedKeys[1]}: {data: []byte(expectedUpdatedKeys[1])}, } - ddbMock.On("Query").Return(map[string][]byte{}, nil).Once() + ddbMock.On("Query").Return(map[string]dynamodbItem{}, nil).Once() codecMock.On("DecodeMultiKey").Return(descMock, nil).Once() descMock.On("Clone").Return(descMock).Once() descMock.On("FindDifference", descMockResult).Return(descMockResult, []string{}, nil).Once() codecMock.On("EncodeMultiKey").Return(expectedUpdated, nil).Once() - ddbMock.On("Batch", context.TODO(), expectedBatch, []dynamodbKey{}).Once() + ddbMock.On("Batch", context.TODO(), expectedBatch, []dynamodbKey{}).Return(false, nil).Once() - err := c.CAS(context.TODO(), key, func(in interface{}) (out interface{}, retry bool, err error) { + err := c.CAS(context.TODO(), key, func(in any) (out any, retry bool, err error) { return descMockResult, true, nil }) @@ -266,17 +301,17 @@ func Test_WatchPrefix(t *testing.T) { ddbMock := NewDynamodbClientMock() codecMock := &CodecMock{} c := NewClientMock(ddbMock, codecMock, TestLogger{}, prometheus.NewPedanticRegistry(), defaultPullTime, defaultBackoff) - data := map[string][]byte{} + data := map[string]dynamodbItem{} dataKey := []string{"t1", "t2"} - data[dataKey[0]] = []byte(dataKey[0]) - data[dataKey[1]] = []byte(dataKey[1]) + data[dataKey[0]] = dynamodbItem{data: []byte(dataKey[0])} + data[dataKey[1]] = dynamodbItem{data: []byte(dataKey[1])} calls := 0 ddbMock.On("Query").Return(data, nil) codecMock.On("Decode").Twice() - c.WatchPrefix(context.TODO(), key, func(key string, i interface{}) bool { - require.EqualValues(t, string(data[key]), i) + c.WatchPrefix(context.TODO(), key, func(key string, i any) bool { + require.EqualValues(t, string(data[key].data), i) delete(data, key) calls++ return calls < 2 @@ -321,7 +356,7 @@ func Test_DynamodbKVWithTimeout(t *testing.T) { err = dbWithTimeout.Put(ctx, dynamodbKey{primaryKey: key}, []byte{}) require.True(t, errors.Is(err, context.DeadlineExceeded)) - err = dbWithTimeout.Batch(ctx, nil, nil) + _, err = dbWithTimeout.Batch(ctx, nil, nil) require.True(t, errors.Is(err, context.DeadlineExceeded)) } @@ -358,13 +393,13 @@ func (m *MockDynamodbClient) List(context.Context, dynamodbKey) ([]string, float } return args.Get(0).([]string), 0, err } -func (m *MockDynamodbClient) Query(context.Context, dynamodbKey, bool) (map[string][]byte, float64, error) { +func (m *MockDynamodbClient) Query(context.Context, dynamodbKey, bool) (map[string]dynamodbItem, float64, error) { args := m.Called() var err error if args.Get(1) != nil { err = args.Get(1).(error) } - return args.Get(0).(map[string][]byte), 0, err + return args.Get(0).(map[string]dynamodbItem), 0, err } func (m *MockDynamodbClient) Delete(ctx context.Context, key dynamodbKey) error { m.Called(ctx, key) @@ -374,15 +409,19 @@ func (m *MockDynamodbClient) Put(ctx context.Context, key dynamodbKey, data []by m.Called(ctx, key, data) return nil } -func (m *MockDynamodbClient) Batch(ctx context.Context, put map[dynamodbKey][]byte, delete []dynamodbKey) error { - m.Called(ctx, put, delete) - return nil +func (m *MockDynamodbClient) Batch(ctx context.Context, put map[dynamodbKey]dynamodbItem, delete []dynamodbKey) (bool, error) { + args := m.Called(ctx, put, delete) + var err error + if args.Get(1) != nil { + err = args.Get(1).(error) + } + return args.Get(0).(bool), err } type TestLogger struct { } -func (l TestLogger) Log(...interface{}) error { +func (l TestLogger) Log(...any) error { return nil } @@ -396,23 +435,23 @@ func (*CodecMock) CodecID() string { } // Decode implements Codec. -func (m *CodecMock) Decode(bytes []byte) (interface{}, error) { +func (m *CodecMock) Decode(bytes []byte) (any, error) { m.Called() return string(bytes), nil } // Encode implements Codec. -func (m *CodecMock) Encode(i interface{}) ([]byte, error) { +func (m *CodecMock) Encode(i any) ([]byte, error) { m.Called() return []byte(i.(string)), nil } -func (m *CodecMock) EncodeMultiKey(interface{}) (map[string][]byte, error) { +func (m *CodecMock) EncodeMultiKey(any) (map[string][]byte, error) { args := m.Called() return args.Get(0).(map[string][]byte), nil } -func (m *CodecMock) DecodeMultiKey(map[string][]byte) (interface{}, error) { +func (m *CodecMock) DecodeMultiKey(map[string][]byte) (any, error) { args := m.Called() var err error if args.Get(1) != nil { @@ -425,17 +464,17 @@ type DescMock struct { mock.Mock } -func (m *DescMock) Clone() interface{} { +func (m *DescMock) Clone() any { args := m.Called() return args.Get(0) } -func (m *DescMock) SplitByID() map[string]interface{} { +func (m *DescMock) SplitByID() map[string]any { args := m.Called() - return args.Get(0).(map[string]interface{}) + return args.Get(0).(map[string]any) } -func (m *DescMock) JoinIds(map[string]interface{}) { +func (m *DescMock) JoinIds(map[string]any) { m.Called() } @@ -444,7 +483,7 @@ func (m *DescMock) GetItemFactory() proto.Message { return args.Get(0).(proto.Message) } -func (m *DescMock) FindDifference(that codec.MultiKey) (interface{}, []string, error) { +func (m *DescMock) FindDifference(that codec.MultiKey) (any, []string, error) { args := m.Called(that) var err error if args.Get(2) != nil { @@ -471,7 +510,7 @@ func (d *dynamodbKVWithDelayAndContextCheck) List(ctx context.Context, key dynam } } -func (d *dynamodbKVWithDelayAndContextCheck) Query(ctx context.Context, key dynamodbKey, isPrefix bool) (map[string][]byte, float64, error) { +func (d *dynamodbKVWithDelayAndContextCheck) Query(ctx context.Context, key dynamodbKey, isPrefix bool) (map[string]dynamodbItem, float64, error) { select { case <-ctx.Done(): return nil, 0, ctx.Err() @@ -498,10 +537,10 @@ func (d *dynamodbKVWithDelayAndContextCheck) Put(ctx context.Context, key dynamo } } -func (d *dynamodbKVWithDelayAndContextCheck) Batch(ctx context.Context, put map[dynamodbKey][]byte, delete []dynamodbKey) error { +func (d *dynamodbKVWithDelayAndContextCheck) Batch(ctx context.Context, put map[dynamodbKey]dynamodbItem, delete []dynamodbKey) (bool, error) { select { case <-ctx.Done(): - return ctx.Err() + return false, ctx.Err() case <-time.After(d.delay): return d.ddbClient.Batch(ctx, put, delete) } diff --git a/pkg/ring/kv/dynamodb/dynamodb.go b/pkg/ring/kv/dynamodb/dynamodb.go index 2dc4769d6e..9bc4e99e30 100644 --- a/pkg/ring/kv/dynamodb/dynamodb.go +++ b/pkg/ring/kv/dynamodb/dynamodb.go @@ -2,15 +2,16 @@ package dynamodb import ( "context" + "errors" "fmt" "math" "strconv" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/dynamodb" - "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/service/dynamodb" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" "github.com/go-kit/log" ) @@ -27,24 +28,37 @@ type dynamodbKey struct { type dynamoDbClient interface { List(ctx context.Context, key dynamodbKey) ([]string, float64, error) - Query(ctx context.Context, key dynamodbKey, isPrefix bool) (map[string][]byte, float64, error) + Query(ctx context.Context, key dynamodbKey, isPrefix bool) (map[string]dynamodbItem, float64, error) Delete(ctx context.Context, key dynamodbKey) error Put(ctx context.Context, key dynamodbKey, data []byte) error - Batch(ctx context.Context, put map[dynamodbKey][]byte, delete []dynamodbKey) error + Batch(ctx context.Context, put map[dynamodbKey]dynamodbItem, delete []dynamodbKey) (bool, error) +} + +type dynamoDBAPI interface { + dynamodb.QueryAPIClient + DeleteItem(ctx context.Context, params *dynamodb.DeleteItemInput, optFns ...func(*dynamodb.Options)) (*dynamodb.DeleteItemOutput, error) + PutItem(ctx context.Context, params *dynamodb.PutItemInput, optFns ...func(*dynamodb.Options)) (*dynamodb.PutItemOutput, error) + TransactWriteItems(ctx context.Context, params *dynamodb.TransactWriteItemsInput, optFns ...func(*dynamodb.Options)) (*dynamodb.TransactWriteItemsOutput, error) } type dynamodbKV struct { - ddbClient dynamodbiface.DynamoDBAPI + ddbClient dynamoDBAPI logger log.Logger tableName *string ttlValue time.Duration } +type dynamodbItem struct { + data []byte + version int64 +} + var ( primaryKey = "RingKey" sortKey = "InstanceKey" contentData = "Data" timeToLive = "ttl" + version = "version" ) func newDynamodbKV(cfg Config, logger log.Logger) (dynamodbKV, error) { @@ -52,37 +66,37 @@ func newDynamodbKV(cfg Config, logger log.Logger) (dynamodbKV, error) { return dynamodbKV{}, err } - sess, err := session.NewSession() - if err != nil { - return dynamodbKV{}, err - } + awsConfig := []func(*config.LoadOptions) error{} - awsCfg := aws.NewConfig() if len(cfg.Region) > 0 { - awsCfg = awsCfg.WithRegion(cfg.Region) + awsConfig = append(awsConfig, config.WithRegion(cfg.Region)) + } + + awsCfg, err := config.LoadDefaultConfig( + context.Background(), + awsConfig..., + ) + if err != nil { + return dynamodbKV{}, err } - dynamoDB := dynamodb.New(sess, awsCfg) + dynamoDB := dynamodb.NewFromConfig(awsCfg) - ddbKV := &dynamodbKV{ + return dynamodbKV{ ddbClient: dynamoDB, logger: logger, tableName: aws.String(cfg.TableName), ttlValue: cfg.TTL, - } - - return *ddbKV, nil + }, nil } func validateConfigInput(cfg Config) error { if len(cfg.TableName) < 3 { return fmt.Errorf("invalid dynamodb table name: %s", cfg.TableName) } - return nil } -// for testing func (kv dynamodbKV) getTTL() time.Duration { return kv.ttlValue } @@ -90,67 +104,90 @@ func (kv dynamodbKV) getTTL() time.Duration { func (kv dynamodbKV) List(ctx context.Context, key dynamodbKey) ([]string, float64, error) { var keys []string var totalCapacity float64 + input := &dynamodb.QueryInput{ TableName: kv.tableName, - ReturnConsumedCapacity: aws.String(dynamodb.ReturnConsumedCapacityTotal), - KeyConditions: map[string]*dynamodb.Condition{ + ReturnConsumedCapacity: types.ReturnConsumedCapacityTotal, + KeyConditions: map[string]types.Condition{ primaryKey: { - ComparisonOperator: aws.String("EQ"), - AttributeValueList: []*dynamodb.AttributeValue{ - { - S: aws.String(key.primaryKey), - }, + ComparisonOperator: types.ComparisonOperatorEq, + AttributeValueList: []types.AttributeValue{ + &types.AttributeValueMemberS{Value: key.primaryKey}, }, }, }, - AttributesToGet: []*string{aws.String(sortKey)}, + AttributesToGet: []string{sortKey}, } - err := kv.ddbClient.QueryPagesWithContext(ctx, input, func(output *dynamodb.QueryOutput, _ bool) bool { - totalCapacity += getCapacityUnits(output.ConsumedCapacity) - for _, item := range output.Items { - keys = append(keys, item[sortKey].String()) + paginator := dynamodb.NewQueryPaginator(kv.ddbClient, input) + + for paginator.HasMorePages() { + page, err := paginator.NextPage(ctx) + if err != nil { + return nil, totalCapacity, err + } + totalCapacity += getCapacityUnits(page.ConsumedCapacity) + for _, item := range page.Items { + if v, ok := item[sortKey].(*types.AttributeValueMemberS); ok { + keys = append(keys, v.Value) + } } - return true - }) - if err != nil { - return nil, totalCapacity, err } return keys, totalCapacity, nil } -func (kv dynamodbKV) Query(ctx context.Context, key dynamodbKey, isPrefix bool) (map[string][]byte, float64, error) { - keys := make(map[string][]byte) +func (kv dynamodbKV) Query(ctx context.Context, key dynamodbKey, isPrefix bool) (map[string]dynamodbItem, float64, error) { + keys := make(map[string]dynamodbItem) var totalCapacity float64 - co := dynamodb.ComparisonOperatorEq + + co := types.ComparisonOperatorEq if isPrefix { - co = dynamodb.ComparisonOperatorBeginsWith + co = types.ComparisonOperatorBeginsWith } + input := &dynamodb.QueryInput{ TableName: kv.tableName, - ReturnConsumedCapacity: aws.String(dynamodb.ReturnConsumedCapacityTotal), - KeyConditions: map[string]*dynamodb.Condition{ + ReturnConsumedCapacity: types.ReturnConsumedCapacityTotal, + KeyConditions: map[string]types.Condition{ primaryKey: { - ComparisonOperator: aws.String(co), - AttributeValueList: []*dynamodb.AttributeValue{ - { - S: aws.String(key.primaryKey), - }, + ComparisonOperator: co, + AttributeValueList: []types.AttributeValue{ + &types.AttributeValueMemberS{Value: key.primaryKey}, }, }, }, } - err := kv.ddbClient.QueryPagesWithContext(ctx, input, func(output *dynamodb.QueryOutput, _ bool) bool { - totalCapacity += getCapacityUnits(output.ConsumedCapacity) - for _, item := range output.Items { - keys[*item[sortKey].S] = item[contentData].B + paginator := dynamodb.NewQueryPaginator(kv.ddbClient, input) + + for paginator.HasMorePages() { + page, err := paginator.NextPage(ctx) + if err != nil { + return nil, totalCapacity, err + } + totalCapacity += getCapacityUnits(page.ConsumedCapacity) + + for _, item := range page.Items { + itemVersion := int64(0) + if v, ok := item[version].(*types.AttributeValueMemberN); ok { + parsedVersion, err := strconv.ParseInt(v.Value, 10, 0) + if err != nil { + kv.logger.Log("msg", "failed to parse item version", "version", v.Value, "err", err) + } else { + itemVersion = parsedVersion + } + } + + if d, ok := item[contentData].(*types.AttributeValueMemberB); ok { + if s, ok := item[sortKey].(*types.AttributeValueMemberS); ok { + keys[s.Value] = dynamodbItem{ + data: d.Value, + version: itemVersion, + } + } + } } - return true - }) - if err != nil { - return nil, totalCapacity, err } return keys, totalCapacity, nil @@ -159,51 +196,50 @@ func (kv dynamodbKV) Query(ctx context.Context, key dynamodbKey, isPrefix bool) func (kv dynamodbKV) Delete(ctx context.Context, key dynamodbKey) (float64, error) { input := &dynamodb.DeleteItemInput{ TableName: kv.tableName, - ReturnConsumedCapacity: aws.String(dynamodb.ReturnConsumedCapacityTotal), + ReturnConsumedCapacity: types.ReturnConsumedCapacityTotal, Key: generateItemKey(key), } - totalCapacity := float64(0) - output, err := kv.ddbClient.DeleteItemWithContext(ctx, input) - if err != nil { - totalCapacity = getCapacityUnits(output.ConsumedCapacity) - } + output, err := kv.ddbClient.DeleteItem(ctx, input) + totalCapacity := getCapacityUnits(output.ConsumedCapacity) return totalCapacity, err } func (kv dynamodbKV) Put(ctx context.Context, key dynamodbKey, data []byte) (float64, error) { input := &dynamodb.PutItemInput{ TableName: kv.tableName, - ReturnConsumedCapacity: aws.String(dynamodb.ReturnConsumedCapacityTotal), - Item: kv.generatePutItemRequest(key, data), - } - totalCapacity := float64(0) - output, err := kv.ddbClient.PutItemWithContext(ctx, input) - if err != nil { - totalCapacity = getCapacityUnits(output.ConsumedCapacity) + ReturnConsumedCapacity: types.ReturnConsumedCapacityTotal, + Item: kv.generatePutItemRequest(key, dynamodbItem{data: data}), } + output, err := kv.ddbClient.PutItem(ctx, input) + totalCapacity := getCapacityUnits(output.ConsumedCapacity) return totalCapacity, err } -func (kv dynamodbKV) Batch(ctx context.Context, put map[dynamodbKey][]byte, delete []dynamodbKey) (float64, error) { +func (kv dynamodbKV) Batch(ctx context.Context, put map[dynamodbKey]dynamodbItem, delete []dynamodbKey) (float64, bool, error) { totalCapacity := float64(0) writeRequestSize := len(put) + len(delete) if writeRequestSize == 0 { - return totalCapacity, nil + return totalCapacity, false, nil } - writeRequestsSlices := make([][]*dynamodb.WriteRequest, int(math.Ceil(float64(writeRequestSize)/float64(DdbBatchSizeLimit)))) - for i := 0; i < len(writeRequestsSlices); i++ { - writeRequestsSlices[i] = make([]*dynamodb.WriteRequest, 0, DdbBatchSizeLimit) + writeRequestsSlices := make([][]types.TransactWriteItem, int(math.Ceil(float64(writeRequestSize)/float64(DdbBatchSizeLimit)))) + for i := range writeRequestsSlices { + writeRequestsSlices[i] = make([]types.TransactWriteItem, 0, DdbBatchSizeLimit) } currIdx := 0 - for key, data := range put { - item := kv.generatePutItemRequest(key, data) - writeRequestsSlices[currIdx] = append(writeRequestsSlices[currIdx], &dynamodb.WriteRequest{ - PutRequest: &dynamodb.PutRequest{ - Item: item, + for key, ddbItem := range put { + item := kv.generatePutItemRequest(key, ddbItem) + ddbPut := &types.Put{ + TableName: kv.tableName, + Item: item, + ConditionExpression: aws.String("attribute_not_exists(version) OR version = :v"), + ExpressionAttributeValues: map[string]types.AttributeValue{ + ":v": &types.AttributeValueMemberN{Value: strconv.FormatInt(ddbItem.version, 10)}, }, - }) + } + + writeRequestsSlices[currIdx] = append(writeRequestsSlices[currIdx], types.TransactWriteItem{Put: ddbPut}) if len(writeRequestsSlices[currIdx]) == DdbBatchSizeLimit { currIdx++ } @@ -211,48 +247,47 @@ func (kv dynamodbKV) Batch(ctx context.Context, put map[dynamodbKey][]byte, dele for _, key := range delete { item := generateItemKey(key) - writeRequestsSlices[currIdx] = append(writeRequestsSlices[currIdx], &dynamodb.WriteRequest{ - DeleteRequest: &dynamodb.DeleteRequest{ - Key: item, - }, - }) + ddbDelete := &types.Delete{ + TableName: kv.tableName, + Key: item, + } + writeRequestsSlices[currIdx] = append(writeRequestsSlices[currIdx], types.TransactWriteItem{Delete: ddbDelete}) if len(writeRequestsSlices[currIdx]) == DdbBatchSizeLimit { currIdx++ } } for _, slice := range writeRequestsSlices { - input := &dynamodb.BatchWriteItemInput{ - ReturnConsumedCapacity: aws.String(dynamodb.ReturnConsumedCapacityTotal), - RequestItems: map[string][]*dynamodb.WriteRequest{ - *kv.tableName: slice, - }, + if len(slice) == 0 { + continue } - - resp, err := kv.ddbClient.BatchWriteItemWithContext(ctx, input) + resp, err := kv.ddbClient.TransactWriteItems(ctx, &dynamodb.TransactWriteItemsInput{ + TransactItems: slice, + }) if err != nil { - return totalCapacity, err + var checkFailed *types.ConditionalCheckFailedException + isCheckFailed := errors.As(err, &checkFailed) + if isCheckFailed { + kv.logger.Log("msg", "conditional check failed on DynamoDB Batch", "err", err) + } + return totalCapacity, isCheckFailed, err } for _, consumedCapacity := range resp.ConsumedCapacity { - totalCapacity += getCapacityUnits(consumedCapacity) - } - - if len(resp.UnprocessedItems) > 0 { - return totalCapacity, fmt.Errorf("error processing batch request for %s requests", resp.UnprocessedItems) + totalCapacity += getCapacityUnits(&consumedCapacity) } } - return totalCapacity, nil + return totalCapacity, false, nil } -func (kv dynamodbKV) generatePutItemRequest(key dynamodbKey, data []byte) map[string]*dynamodb.AttributeValue { +func (kv dynamodbKV) generatePutItemRequest(key dynamodbKey, ddbItem dynamodbItem) map[string]types.AttributeValue { item := generateItemKey(key) - item[contentData] = &dynamodb.AttributeValue{ - B: data, - } + item[contentData] = &types.AttributeValueMemberB{Value: ddbItem.data} + item[version] = &types.AttributeValueMemberN{Value: strconv.FormatInt(ddbItem.version+1, 10)} + if kv.getTTL() > 0 { - item[timeToLive] = &dynamodb.AttributeValue{ - N: aws.String(strconv.FormatInt(time.Now().UTC().Add(kv.getTTL()).Unix(), 10)), + item[timeToLive] = &types.AttributeValueMemberN{ + Value: strconv.FormatInt(time.Now().UTC().Add(kv.getTTL()).Unix(), 10), } } @@ -274,7 +309,7 @@ func (d *dynamodbKVWithTimeout) List(ctx context.Context, key dynamodbKey) ([]st return d.ddbClient.List(ctx, key) } -func (d *dynamodbKVWithTimeout) Query(ctx context.Context, key dynamodbKey, isPrefix bool) (map[string][]byte, float64, error) { +func (d *dynamodbKVWithTimeout) Query(ctx context.Context, key dynamodbKey, isPrefix bool) (map[string]dynamodbItem, float64, error) { ctx, cancel := context.WithTimeout(ctx, d.timeout) defer cancel() return d.ddbClient.Query(ctx, key, isPrefix) @@ -292,28 +327,23 @@ func (d *dynamodbKVWithTimeout) Put(ctx context.Context, key dynamodbKey, data [ return d.ddbClient.Put(ctx, key, data) } -func (d *dynamodbKVWithTimeout) Batch(ctx context.Context, put map[dynamodbKey][]byte, delete []dynamodbKey) error { +func (d *dynamodbKVWithTimeout) Batch(ctx context.Context, put map[dynamodbKey]dynamodbItem, delete []dynamodbKey) (bool, error) { ctx, cancel := context.WithTimeout(ctx, d.timeout) defer cancel() return d.ddbClient.Batch(ctx, put, delete) } -func generateItemKey(key dynamodbKey) map[string]*dynamodb.AttributeValue { - resp := map[string]*dynamodb.AttributeValue{ - primaryKey: { - S: aws.String(key.primaryKey), - }, +func generateItemKey(key dynamodbKey) map[string]types.AttributeValue { + resp := map[string]types.AttributeValue{ + primaryKey: &types.AttributeValueMemberS{Value: key.primaryKey}, } if len(key.sortKey) > 0 { - resp[sortKey] = &dynamodb.AttributeValue{ - S: aws.String(key.sortKey), - } + resp[sortKey] = &types.AttributeValueMemberS{Value: key.sortKey} } - return resp } -func getCapacityUnits(cap *dynamodb.ConsumedCapacity) float64 { +func getCapacityUnits(cap *types.ConsumedCapacity) float64 { if cap != nil && cap.CapacityUnits != nil { return *cap.CapacityUnits } diff --git a/pkg/ring/kv/dynamodb/dynamodb_test.go b/pkg/ring/kv/dynamodb/dynamodb_test.go index 7e253716d1..065e38c644 100644 --- a/pkg/ring/kv/dynamodb/dynamodb_test.go +++ b/pkg/ring/kv/dynamodb/dynamodb_test.go @@ -4,20 +4,22 @@ import ( "context" "fmt" "strconv" + "strings" "testing" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/service/dynamodb" - "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/dynamodb" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" "github.com/stretchr/testify/require" ) func Test_TTLDisabled(t *testing.T) { ddbClientMock := &mockDynamodb{ putItem: func(input *dynamodb.PutItemInput) *dynamodb.PutItemOutput { - require.Nil(t, input.Item["ttl"]) + // ttl must be absent + _, hasTTL := input.Item["ttl"] + require.False(t, hasTTL) return &dynamodb.PutItemOutput{} }, } @@ -25,23 +27,28 @@ func Test_TTLDisabled(t *testing.T) { ddb := newDynamodbClientMock("TEST", ddbClientMock, 0) _, err := ddb.Put(context.TODO(), dynamodbKey{primaryKey: "test", sortKey: "test1"}, []byte("TEST")) require.NoError(t, err) - } func Test_newDynamodbKV(t *testing.T) { + // Just ensures construction doesn’t error (no API call happens here) _, err := newDynamodbKV(Config{Region: "us-west-2", TableName: "TEST"}, TestLogger{}) - require.NoError(t, err) } func Test_TTL(t *testing.T) { ddbClientMock := &mockDynamodb{ putItem: func(input *dynamodb.PutItemInput) *dynamodb.PutItemOutput { - require.NotNil(t, input.Item["ttl"].N) - parsedTime, err := strconv.ParseInt(*input.Item["ttl"].N, 10, 64) + av, ok := input.Item["ttl"] + require.True(t, ok, "ttl attribute missing") + num, ok := av.(*types.AttributeValueMemberN) + require.True(t, ok, "ttl should be a number") + + parsedTime, err := strconv.ParseInt(num.Value, 10, 64) require.NoError(t, err) - require.Greater(t, time.Unix(parsedTime, 0), time.Now().UTC().Add(4*time.Hour), 10) - require.LessOrEqual(t, time.Unix(parsedTime, 0), time.Now().UTC().Add(6*time.Hour), 10) + + ts := time.Unix(parsedTime, 0) + require.True(t, ts.After(time.Now().UTC().Add(4*time.Hour))) + require.True(t, !ts.After(time.Now().UTC().Add(6*time.Hour))) return &dynamodb.PutItemOutput{} }, } @@ -61,24 +68,28 @@ func Test_Batch(t *testing.T) { primaryKey: "PKDelete", sortKey: "SKDelete", } - update := map[dynamodbKey][]byte{ - ddbKeyUpdate: {}, + update := map[dynamodbKey]dynamodbItem{ + ddbKeyUpdate: { + data: []byte{}, + version: 0, + }, } - delete := []dynamodbKey{ddbKeyDelete} + toDelete := []dynamodbKey{ddbKeyDelete} ddbClientMock := &mockDynamodb{ - batchWriteItem: func(input *dynamodb.BatchWriteItemInput) (*dynamodb.BatchWriteItemOutput, error) { - require.NotNil(t, input.RequestItems[tableName]) - require.EqualValues(t, 2, len(input.RequestItems[tableName])) + transactWriteItem: func(input *dynamodb.TransactWriteItemsInput) (*dynamodb.TransactWriteItemsOutput, error) { + require.NotNil(t, input.TransactItems) + require.EqualValues(t, 2, len(input.TransactItems)) require.True(t, - (checkPutRequestForItem(input.RequestItems[tableName][0], ddbKeyUpdate) || checkPutRequestForItem(input.RequestItems[tableName][1], ddbKeyUpdate)) && - (checkDeleteRequestForItem(input.RequestItems[tableName][0], ddbKeyDelete) || checkDeleteRequestForItem(input.RequestItems[tableName][1], ddbKeyDelete))) - return &dynamodb.BatchWriteItemOutput{}, nil + (checkPutForItem(input.TransactItems[0].Put, ddbKeyUpdate)) && + (checkPutForConditionalExpression(input.TransactItems[0].Put)) && + (checkDeleteForItem(input.TransactItems[1].Delete, ddbKeyDelete))) + return &dynamodb.TransactWriteItemsOutput{}, nil }, } ddb := newDynamodbClientMock(tableName, ddbClientMock, 5*time.Hour) - _, err := ddb.Batch(context.TODO(), update, delete) + _, _, err := ddb.Batch(context.TODO(), update, toDelete) require.NoError(t, err) } @@ -90,9 +101,9 @@ func Test_BatchSlices(t *testing.T) { } numOfCalls := 0 ddbClientMock := &mockDynamodb{ - batchWriteItem: func(input *dynamodb.BatchWriteItemInput) (*dynamodb.BatchWriteItemOutput, error) { + transactWriteItem: func(input *dynamodb.TransactWriteItemsInput) (*dynamodb.TransactWriteItemsOutput, error) { numOfCalls++ - return &dynamodb.BatchWriteItemOutput{}, nil + return &dynamodb.TransactWriteItemsOutput{}, nil }, } ddb := newDynamodbClientMock(tableName, ddbClientMock, 5*time.Hour) @@ -102,7 +113,6 @@ func Test_BatchSlices(t *testing.T) { numOfExecutions int expectedCalls int }{ - // These tests follow each other (end state of KV in state is starting point in the next state). { name: "Test slice on lower bound", numOfExecutions: 24, @@ -121,18 +131,16 @@ func Test_BatchSlices(t *testing.T) { } { t.Run(tc.name, func(t *testing.T) { numOfCalls = 0 - delete := make([]dynamodbKey, 0, tc.numOfExecutions) + toDelete := make([]dynamodbKey, 0, tc.numOfExecutions) for i := 0; i < tc.numOfExecutions; i++ { - delete = append(delete, ddbKeyDelete) + toDelete = append(toDelete, ddbKeyDelete) } - _, err := ddb.Batch(context.TODO(), nil, delete) + _, _, err := ddb.Batch(context.TODO(), nil, toDelete) require.NoError(t, err) require.EqualValues(t, tc.expectedCalls, numOfCalls) - }) } - } func Test_EmptyBatch(t *testing.T) { @@ -140,92 +148,125 @@ func Test_EmptyBatch(t *testing.T) { ddbClientMock := &mockDynamodb{} ddb := newDynamodbClientMock(tableName, ddbClientMock, 5*time.Hour) - _, err := ddb.Batch(context.TODO(), nil, nil) + _, _, err := ddb.Batch(context.TODO(), nil, nil) require.NoError(t, err) } -func Test_Batch_UnprocessedItems(t *testing.T) { +func Test_Batch_Error(t *testing.T) { tableName := "TEST" - ddbKeyDelete := dynamodbKey{ - primaryKey: "PKDelete", - sortKey: "SKDelete", - } - delete := []dynamodbKey{ddbKeyDelete} - ddbClientMock := &mockDynamodb{ - batchWriteItem: func(input *dynamodb.BatchWriteItemInput) (*dynamodb.BatchWriteItemOutput, error) { - return &dynamodb.BatchWriteItemOutput{ - UnprocessedItems: map[string][]*dynamodb.WriteRequest{ - tableName: {&dynamodb.WriteRequest{ - PutRequest: &dynamodb.PutRequest{Item: generateItemKey(ddbKeyDelete)}}, - }, - }, - }, nil + testCases := []struct { + name string + mockError error + expectedRetry bool + }{ + { + name: "generic_error_no_retry", + mockError: fmt.Errorf("mocked error"), + expectedRetry: false, + }, + { + name: "conditional_check_failed_should_retry", + mockError: &types.ConditionalCheckFailedException{}, + expectedRetry: true, }, } - ddb := newDynamodbClientMock(tableName, ddbClientMock, 5*time.Hour) - _, err := ddb.Batch(context.TODO(), nil, delete) - require.Errorf(t, err, "error processing batch dynamodb") -} + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ddbClientMock := &mockDynamodb{ + transactWriteItem: func(input *dynamodb.TransactWriteItemsInput) (*dynamodb.TransactWriteItemsOutput, error) { + return nil, tc.mockError + }, + } -func Test_Batch_Error(t *testing.T) { - tableName := "TEST" - ddbKeyDelete := dynamodbKey{ - primaryKey: "PKDelete", - sortKey: "SKDelete", - } - delete := []dynamodbKey{ddbKeyDelete} + ddb := newDynamodbClientMock(tableName, ddbClientMock, 5*time.Hour) - ddbClientMock := &mockDynamodb{ - batchWriteItem: func(input *dynamodb.BatchWriteItemInput) (*dynamodb.BatchWriteItemOutput, error) { - return &dynamodb.BatchWriteItemOutput{}, fmt.Errorf("mocked error") - }, + toDelete := []dynamodbKey{{primaryKey: "PKDelete", sortKey: "SKDelete"}} + _, retry, err := ddb.Batch(context.TODO(), nil, toDelete) + + require.Error(t, err) + require.Equal(t, tc.expectedRetry, retry) + }) } +} - ddb := newDynamodbClientMock(tableName, ddbClientMock, 5*time.Hour) - _, err := ddb.Batch(context.TODO(), nil, delete) - require.Errorf(t, err, "mocked error") +func checkPutForItem(request *types.Put, key dynamodbKey) bool { + if request == nil || request.Item == nil { + return false + } + pk, ok := request.Item[primaryKey].(*types.AttributeValueMemberS) + if !ok { + return false + } + sk, ok := request.Item[sortKey].(*types.AttributeValueMemberS) + if !ok { + return false + } + return pk.Value == key.primaryKey && sk.Value == key.sortKey } -func checkPutRequestForItem(request *dynamodb.WriteRequest, key dynamodbKey) bool { - return request.PutRequest != nil && - request.PutRequest.Item[primaryKey] != nil && - request.PutRequest.Item[sortKey] != nil && - *request.PutRequest.Item[primaryKey].S == key.primaryKey && - *request.PutRequest.Item[sortKey].S == key.sortKey +func checkDeleteForItem(request *types.Delete, key dynamodbKey) bool { + if request == nil || request.Key == nil { + return false + } + pk, ok := request.Key[primaryKey].(*types.AttributeValueMemberS) + if !ok { + return false + } + sk, ok := request.Key[sortKey].(*types.AttributeValueMemberS) + if !ok { + return false + } + return pk.Value == key.primaryKey && sk.Value == key.sortKey } -func checkDeleteRequestForItem(request *dynamodb.WriteRequest, key dynamodbKey) bool { - return request.DeleteRequest != nil && - request.DeleteRequest.Key[primaryKey] != nil && - request.DeleteRequest.Key[sortKey] != nil && - *request.DeleteRequest.Key[primaryKey].S == key.primaryKey && - *request.DeleteRequest.Key[sortKey].S == key.sortKey +func checkPutForConditionalExpression(request *types.Put) bool { + return request != nil && + request.ConditionExpression != nil && + strings.Contains(*request.ConditionExpression, "version = :v") } -type mockDynamodb struct { - putItem func(input *dynamodb.PutItemInput) *dynamodb.PutItemOutput - batchWriteItem func(input *dynamodb.BatchWriteItemInput) (*dynamodb.BatchWriteItemOutput, error) +// ---- v2 mock ---- - dynamodbiface.DynamoDBAPI +type mockDynamodb struct { + putItem func(input *dynamodb.PutItemInput) *dynamodb.PutItemOutput + transactWriteItem func(input *dynamodb.TransactWriteItemsInput) (*dynamodb.TransactWriteItemsOutput, error) + query func(input *dynamodb.QueryInput) (*dynamodb.QueryOutput, error) } -func (m *mockDynamodb) PutItemWithContext(_ context.Context, input *dynamodb.PutItemInput, _ ...request.Option) (*dynamodb.PutItemOutput, error) { +// Implement the minimal methods our code calls (v2 signatures). +func (m *mockDynamodb) PutItem(_ context.Context, input *dynamodb.PutItemInput, _ ...func(*dynamodb.Options)) (*dynamodb.PutItemOutput, error) { + if m.putItem == nil { + return &dynamodb.PutItemOutput{}, nil + } return m.putItem(input), nil } -func (m *mockDynamodb) BatchWriteItemWithContext(ctx context.Context, input *dynamodb.BatchWriteItemInput, opts ...request.Option) (*dynamodb.BatchWriteItemOutput, error) { - return m.batchWriteItem(input) +func (m *mockDynamodb) DeleteItem(_ context.Context, _ *dynamodb.DeleteItemInput, _ ...func(*dynamodb.Options)) (*dynamodb.DeleteItemOutput, error) { + // Not used in these tests; return empty success. + return &dynamodb.DeleteItemOutput{}, nil +} + +func (m *mockDynamodb) TransactWriteItems(_ context.Context, input *dynamodb.TransactWriteItemsInput, _ ...func(*dynamodb.Options)) (*dynamodb.TransactWriteItemsOutput, error) { + if m.transactWriteItem == nil { + return &dynamodb.TransactWriteItemsOutput{}, nil + } + return m.transactWriteItem(input) +} + +func (m *mockDynamodb) Query(_ context.Context, input *dynamodb.QueryInput, _ ...func(*dynamodb.Options)) (*dynamodb.QueryOutput, error) { + if m.query == nil { + return &dynamodb.QueryOutput{}, nil + } + return m.query(input) } func newDynamodbClientMock(tableName string, mock *mockDynamodb, ttl time.Duration) *dynamodbKV { - ddbKV := &dynamodbKV{ - ddbClient: mock, + return &dynamodbKV{ + ddbClient: mock, // satisfies our dynamodbAPI interface logger: TestLogger{}, tableName: aws.String(tableName), ttlValue: ttl, } - - return ddbKV } diff --git a/pkg/ring/kv/dynamodb/metrics.go b/pkg/ring/kv/dynamodb/metrics.go index fc5e35a9e9..1d0f051da0 100644 --- a/pkg/ring/kv/dynamodb/metrics.go +++ b/pkg/ring/kv/dynamodb/metrics.go @@ -17,9 +17,10 @@ type dynamodbInstrumentation struct { } type dynamodbMetrics struct { - dynamodbRequestDuration *instrument.HistogramCollector - dynamodbUsageMetrics *prometheus.CounterVec - dynamodbCasAttempts prometheus.Counter + dynamodbRequestDuration *instrument.HistogramCollector + dynamodbUsageMetrics *prometheus.CounterVec + dynamodbCasAttempts prometheus.Counter + dynamodbConditionalCheckFailures prometheus.Counter } func newDynamoDbMetrics(registerer prometheus.Registerer) *dynamodbMetrics { @@ -39,10 +40,16 @@ func newDynamoDbMetrics(registerer prometheus.Registerer) *dynamodbMetrics { Help: "DynamoDB KV Store Attempted CAS operations", }) + dynamodbConditionalCheckFailures := promauto.With(registerer).NewCounter(prometheus.CounterOpts{ + Name: "dynamodb_kv_conditional_check_failed_total", + Help: "Total number of DynamoDB conditional check failures", + }) + dynamodbMetrics := dynamodbMetrics{ - dynamodbRequestDuration: dynamodbRequestDurationCollector, - dynamodbUsageMetrics: dynamodbUsageMetrics, - dynamodbCasAttempts: dynamodbCasAttempts, + dynamodbRequestDuration: dynamodbRequestDurationCollector, + dynamodbUsageMetrics: dynamodbUsageMetrics, + dynamodbCasAttempts: dynamodbCasAttempts, + dynamodbConditionalCheckFailures: dynamodbConditionalCheckFailures, } return &dynamodbMetrics } @@ -59,8 +66,8 @@ func (d dynamodbInstrumentation) List(ctx context.Context, key dynamodbKey) ([]s return resp, totalCapacity, err } -func (d dynamodbInstrumentation) Query(ctx context.Context, key dynamodbKey, isPrefix bool) (map[string][]byte, float64, error) { - var resp map[string][]byte +func (d dynamodbInstrumentation) Query(ctx context.Context, key dynamodbKey, isPrefix bool) (map[string]dynamodbItem, float64, error) { + var resp map[string]dynamodbItem var totalCapacity float64 err := instrument.CollectedRequest(ctx, "Query", d.ddbMetrics.dynamodbRequestDuration, errorCode, func(ctx context.Context) error { var err error @@ -87,12 +94,19 @@ func (d dynamodbInstrumentation) Put(ctx context.Context, key dynamodbKey, data }) } -func (d dynamodbInstrumentation) Batch(ctx context.Context, put map[dynamodbKey][]byte, delete []dynamodbKey) error { - return instrument.CollectedRequest(ctx, "Batch", d.ddbMetrics.dynamodbRequestDuration, errorCode, func(ctx context.Context) error { - totalCapacity, err := d.kv.Batch(ctx, put, delete) +func (d dynamodbInstrumentation) Batch(ctx context.Context, put map[dynamodbKey]dynamodbItem, delete []dynamodbKey) (bool, error) { + retry := false + err := instrument.CollectedRequest(ctx, "Batch", d.ddbMetrics.dynamodbRequestDuration, errorCode, func(ctx context.Context) error { + var err error + totalCapacity, shouldRetry, err := d.kv.Batch(ctx, put, delete) + retry = shouldRetry + if retry { + d.ddbMetrics.dynamodbConditionalCheckFailures.Inc() + } d.ddbMetrics.dynamodbUsageMetrics.WithLabelValues("Batch").Add(totalCapacity) return err }) + return retry, err } // errorCode converts an error into an error code string. diff --git a/pkg/ring/kv/etcd/etcd.go b/pkg/ring/kv/etcd/etcd.go index ca7dcf050a..1152bff5f7 100644 --- a/pkg/ring/kv/etcd/etcd.go +++ b/pkg/ring/kv/etcd/etcd.go @@ -122,7 +122,7 @@ func New(cfg Config, codec codec.Codec, logger log.Logger) (*Client, error) { } // CAS implements kv.Client. -func (c *Client) CAS(ctx context.Context, key string, f func(in interface{}) (out interface{}, retry bool, err error)) error { +func (c *Client) CAS(ctx context.Context, key string, f func(in any) (out any, retry bool, err error)) error { var revision int64 var lastErr error @@ -137,7 +137,7 @@ func (c *Client) CAS(ctx context.Context, key string, f func(in interface{}) (ou continue } - var intermediate interface{} + var intermediate any if len(resp.Kvs) > 0 { intermediate, err = c.codec.Decode(resp.Kvs[0].Value) if err != nil { @@ -195,7 +195,7 @@ func (c *Client) CAS(ctx context.Context, key string, f func(in interface{}) (ou } // WatchKey implements kv.Client. -func (c *Client) WatchKey(ctx context.Context, key string, f func(interface{}) bool) { +func (c *Client) WatchKey(ctx context.Context, key string, f func(any) bool) { backoff := backoff.New(ctx, backoff.Config{ MinBackoff: 1 * time.Second, MaxBackoff: 1 * time.Minute, @@ -236,7 +236,7 @@ outer: } // WatchPrefix implements kv.Client. -func (c *Client) WatchPrefix(ctx context.Context, key string, f func(string, interface{}) bool) { +func (c *Client) WatchPrefix(ctx context.Context, key string, f func(string, any) bool) { backoff := backoff.New(ctx, backoff.Config{ MinBackoff: 1 * time.Second, MaxBackoff: 1 * time.Minute, @@ -298,7 +298,7 @@ func (c *Client) List(ctx context.Context, prefix string) ([]string, error) { } // Get implements kv.Client. -func (c *Client) Get(ctx context.Context, key string) (interface{}, error) { +func (c *Client) Get(ctx context.Context, key string) (any, error) { opsCtx, cancel := c.opsContext(ctx) defer cancel() diff --git a/pkg/ring/kv/kv_test.go b/pkg/ring/kv/kv_test.go index 37a51ae0da..e5476e14ee 100644 --- a/pkg/ring/kv/kv_test.go +++ b/pkg/ring/kv/kv_test.go @@ -52,14 +52,14 @@ var ( func TestCAS(t *testing.T) { withFixtures(t, func(t *testing.T, client Client) { // Blindly set key to "0". - err := client.CAS(ctx, key, func(in interface{}) (interface{}, bool, error) { + err := client.CAS(ctx, key, func(in any) (any, bool, error) { return "0", true, nil }) require.NoError(t, err) // Swap key to i+1 iff its i. - for i := 0; i < 10; i++ { - err = client.CAS(ctx, key, func(in interface{}) (interface{}, bool, error) { + for i := range 10 { + err = client.CAS(ctx, key, func(in any) (any, bool, error) { require.EqualValues(t, strconv.Itoa(i), in) return strconv.Itoa(i + 1), true, nil }) @@ -78,13 +78,13 @@ func TestCAS(t *testing.T) { func TestNilCAS(t *testing.T) { withFixtures(t, func(t *testing.T, client Client) { // Blindly set key to "0". - err := client.CAS(ctx, key, func(in interface{}) (interface{}, bool, error) { + err := client.CAS(ctx, key, func(in any) (any, bool, error) { return "0", true, nil }) require.NoError(t, err) // Ensure key is "0" and don't set it. - err = client.CAS(ctx, key, func(in interface{}) (interface{}, bool, error) { + err = client.CAS(ctx, key, func(in any) (any, bool, error) { require.EqualValues(t, "0", in) return nil, false, nil }) @@ -113,7 +113,7 @@ func TestWatchKey(t *testing.T) { // Start watching before we even start generating values. // Values will be buffered in the channel. t.Log("Watching in background", "key", key) - client.WatchKey(ctx, key, func(value interface{}) bool { + client.WatchKey(ctx, key, func(value any) bool { observedValuesCh <- value.(string) return true }) @@ -121,11 +121,11 @@ func TestWatchKey(t *testing.T) { // update value for the key go func() { - for i := 0; i < max; i++ { + for i := range max { // Start with sleeping, so that watching client see empty KV store at the beginning. time.Sleep(sleep) - err := client.CAS(ctx, key, func(in interface{}) (out interface{}, retry bool, err error) { + err := client.CAS(ctx, key, func(in any) (out any, retry bool, err error) { return fmt.Sprintf("%d", i), true, nil }) @@ -193,7 +193,7 @@ func TestWatchPrefix(t *testing.T) { defer wg.Done() // start watching before we even start generating values. values will be buffered - client.WatchPrefix(ctx, prefix, func(key string, val interface{}) bool { + client.WatchPrefix(ctx, prefix, func(key string, val any) bool { observedKeysCh <- key return true }) @@ -208,7 +208,7 @@ func TestWatchPrefix(t *testing.T) { time.Sleep(sleep) key := fmt.Sprintf("%s%d", p, i) - err := client.CAS(ctx, key, func(in interface{}) (out interface{}, retry bool, err error) { + err := client.CAS(ctx, key, func(in any) (out any, retry bool, err error) { return key, true, nil }) @@ -247,7 +247,7 @@ func TestWatchPrefix(t *testing.T) { wg.Wait() // verify that each key was reported once, and keys outside prefix were not reported - for i := 0; i < max; i++ { + for i := range max { key := fmt.Sprintf("%s%d", prefix, i) if observedKeys[key] != 1 { @@ -268,7 +268,7 @@ func TestList(t *testing.T) { withFixtures(t, func(t *testing.T, client Client) { for _, key := range keysToCreate { - err := client.CAS(context.Background(), key, func(in interface{}) (out interface{}, retry bool, err error) { + err := client.CAS(context.Background(), key, func(in any) (out any, retry bool, err error) { return key, false, nil }) require.NoError(t, err) diff --git a/pkg/ring/kv/memberlist/broadcast.go b/pkg/ring/kv/memberlist/broadcast.go index 6657b73a51..d567c2e5ed 100644 --- a/pkg/ring/kv/memberlist/broadcast.go +++ b/pkg/ring/kv/memberlist/broadcast.go @@ -2,6 +2,7 @@ package memberlist import ( "fmt" + "slices" "github.com/go-kit/log" "github.com/go-kit/log/level" @@ -28,13 +29,7 @@ func (r ringBroadcast) Invalidates(old memberlist.Broadcast) bool { // and this broadcast has resulted in a newer ring update, we can invalidate the old value for _, oldName := range oldb.content { - found := false - for _, newName := range r.content { - if oldName == newName { - found = true - break - } - } + found := slices.Contains(r.content, oldName) if !found { return false diff --git a/pkg/ring/kv/memberlist/kv_init_service.go b/pkg/ring/kv/memberlist/kv_init_service.go index c3350e5845..7e0b1acb04 100644 --- a/pkg/ring/kv/memberlist/kv_init_service.go +++ b/pkg/ring/kv/memberlist/kv_init_service.go @@ -224,7 +224,7 @@ func viewKey(w http.ResponseWriter, store map[string]valueDesc, key string, form formatValue(w, store[key].value, format) } -func formatValue(w http.ResponseWriter, val interface{}, format string) { +func formatValue(w http.ResponseWriter, val any, format string) { w.WriteHeader(200) w.Header().Add("content-type", "text/plain") diff --git a/pkg/ring/kv/memberlist/memberlist_client.go b/pkg/ring/kv/memberlist/memberlist_client.go index 69f3bfd5ba..206157c284 100644 --- a/pkg/ring/kv/memberlist/memberlist_client.go +++ b/pkg/ring/kv/memberlist/memberlist_client.go @@ -60,7 +60,7 @@ func (c *Client) List(ctx context.Context, prefix string) ([]string, error) { } // Get is part of kv.Client interface. -func (c *Client) Get(ctx context.Context, key string) (interface{}, error) { +func (c *Client) Get(ctx context.Context, key string) (any, error) { err := c.awaitKVRunningOrStopping(ctx) if err != nil { return nil, err @@ -75,7 +75,7 @@ func (c *Client) Delete(ctx context.Context, key string) error { } // CAS is part of kv.Client interface -func (c *Client) CAS(ctx context.Context, key string, f func(in interface{}) (out interface{}, retry bool, err error)) error { +func (c *Client) CAS(ctx context.Context, key string, f func(in any) (out any, retry bool, err error)) error { err := c.awaitKVRunningOrStopping(ctx) if err != nil { return err @@ -85,7 +85,7 @@ func (c *Client) CAS(ctx context.Context, key string, f func(in interface{}) (ou } // WatchKey is part of kv.Client interface. -func (c *Client) WatchKey(ctx context.Context, key string, f func(interface{}) bool) { +func (c *Client) WatchKey(ctx context.Context, key string, f func(any) bool) { err := c.awaitKVRunningOrStopping(ctx) if err != nil { return @@ -96,7 +96,7 @@ func (c *Client) WatchKey(ctx context.Context, key string, f func(interface{}) b // WatchPrefix calls f whenever any value stored under prefix changes. // Part of kv.Client interface. -func (c *Client) WatchPrefix(ctx context.Context, prefix string, f func(string, interface{}) bool) { +func (c *Client) WatchPrefix(ctx context.Context, prefix string, f func(string, any) bool) { err := c.awaitKVRunningOrStopping(ctx) if err != nil { return @@ -658,13 +658,13 @@ func (m *KV) List(prefix string) []string { // Get returns current value associated with given key. // No communication with other nodes in the cluster is done here. -func (m *KV) Get(key string, codec codec.Codec) (interface{}, error) { +func (m *KV) Get(key string, codec codec.Codec) (any, error) { val, _, err := m.get(key, codec) return val, err } // Returns current value with removed tombstones. -func (m *KV) get(key string, codec codec.Codec) (out interface{}, version uint, err error) { +func (m *KV) get(key string, codec codec.Codec) (out any, version uint, err error) { m.storeMu.Lock() v := m.store[key].Clone() m.storeMu.Unlock() @@ -682,7 +682,7 @@ func (m *KV) get(key string, codec codec.Codec) (out interface{}, version uint, // latest value. Notifications that arrive while 'f' is running are coalesced into one subsequent 'f' call. // // Watching ends when 'f' returns false, context is done, or this client is shut down. -func (m *KV) WatchKey(ctx context.Context, key string, codec codec.Codec, f func(interface{}) bool) { +func (m *KV) WatchKey(ctx context.Context, key string, codec codec.Codec, f func(any) bool) { // keep one extra notification, to avoid missing notification if we're busy running the function w := make(chan string, 1) @@ -729,7 +729,7 @@ func (m *KV) WatchKey(ctx context.Context, key string, codec codec.Codec, f func // some notifications may be lost. // // Watching ends when 'f' returns false, context is done, or this client is shut down. -func (m *KV) WatchPrefix(ctx context.Context, prefix string, codec codec.Codec, f func(string, interface{}) bool) { +func (m *KV) WatchPrefix(ctx context.Context, prefix string, codec codec.Codec, f func(string, any) bool) { // we use bigger buffer here, since keys are interesting and we don't want to lose them. w := make(chan string, 16) @@ -828,7 +828,7 @@ func (m *KV) notifyWatchers(key string) { // KV store, and change is broadcast to cluster peers. Merge function is called with CAS flag on, so that it can // detect removals. If Merge doesn't result in any change (returns nil), then operation fails and is retried again. // After too many failed retries, this method returns error. -func (m *KV) CAS(ctx context.Context, key string, codec codec.Codec, f func(in interface{}) (out interface{}, retry bool, err error)) error { +func (m *KV) CAS(ctx context.Context, key string, codec codec.Codec, f func(in any) (out any, retry bool, err error)) error { var lastError error outer: @@ -885,7 +885,7 @@ outer: // returns change, error (or nil, if CAS succeeded), and whether to retry or not. // returns errNoChangeDetected if merge failed to detect change in f's output. -func (m *KV) trySingleCas(key string, codec codec.Codec, f func(in interface{}) (out interface{}, retry bool, err error)) (Mergeable, uint, bool, error) { +func (m *KV) trySingleCas(key string, codec codec.Codec, f func(in any) (out any, retry bool, err error)) (Mergeable, uint, bool, error) { val, ver, err := m.get(key, codec) if err != nil { return nil, 0, false, fmt.Errorf("failed to get value: %v", err) diff --git a/pkg/ring/kv/memberlist/memberlist_client_test.go b/pkg/ring/kv/memberlist/memberlist_client_test.go index fbca1924c4..ed093670d1 100644 --- a/pkg/ring/kv/memberlist/memberlist_client_test.go +++ b/pkg/ring/kv/memberlist/memberlist_client_test.go @@ -6,6 +6,7 @@ import ( "encoding/gob" "errors" "fmt" + "maps" "math" "math/rand" "net" @@ -111,7 +112,7 @@ func (m member) clone() member { return out } -func (d *data) Clone() interface{} { +func (d *data) Clone() any { out := &data{ Members: make(map[string]member, len(d.Members)), } @@ -137,22 +138,22 @@ func (d dataCodec) CodecID() string { return "testDataCodec" } -func (d dataCodec) Decode(b []byte) (interface{}, error) { +func (d dataCodec) Decode(b []byte) (any, error) { dec := gob.NewDecoder(bytes.NewBuffer(b)) out := &data{} err := dec.Decode(out) return out, err } -func (d dataCodec) DecodeMultiKey(map[string][]byte) (interface{}, error) { +func (d dataCodec) DecodeMultiKey(map[string][]byte) (any, error) { return nil, errors.New("dataCodec does not support DecodeMultiKey") } -func (d dataCodec) EncodeMultiKey(interface{}) (map[string][]byte, error) { +func (d dataCodec) EncodeMultiKey(any) (map[string][]byte, error) { return nil, errors.New("dataCodec does not support EncodeMultiKey") } -func (d dataCodec) Encode(val interface{}) ([]byte, error) { +func (d dataCodec) Encode(val any) ([]byte, error) { buf := bytes.Buffer{} enc := gob.NewEncoder(&buf) err := enc.Encode(val) @@ -196,7 +197,7 @@ func updateFn(name string) func(*data) (*data, bool, error) { } } -func get(t *testing.T, kv *Client, key string) interface{} { +func get(t *testing.T, kv *Client, key string) any { val, err := kv.Get(context.Background(), key) if err != nil { t.Fatalf("Failed to get value for key %s: %v", key, err) @@ -227,7 +228,7 @@ func cas(t *testing.T, kv *Client, key string, updateFn func(*data) (*data, bool func casWithErr(ctx context.Context, t *testing.T, kv *Client, key string, updateFn func(*data) (*data, bool, error)) error { t.Helper() - fn := func(in interface{}) (out interface{}, retry bool, err error) { + fn := func(in any) (out any, retry bool, err error) { var r *data if in != nil { r = in.(*data) @@ -469,7 +470,7 @@ func TestMultipleCAS(t *testing.T) { const members = 10 const namePattern = "Member-%d" - for i := 0; i < members; i++ { + for i := range members { wg.Add(1) go func(name string) { defer wg.Done() @@ -487,7 +488,7 @@ func TestMultipleCAS(t *testing.T) { r := getData(t, kv, "test") require.True(t, r != nil, "nil ring") - for i := 0; i < members; i++ { + for i := range members { n := fmt.Sprintf(namePattern, i) if r.Members[n].State != ACTIVE { @@ -498,7 +499,7 @@ func TestMultipleCAS(t *testing.T) { // Make all members leave start = make(chan struct{}) - for i := 0; i < members; i++ { + for i := range members { wg.Add(1) go func(name string) { defer wg.Done() @@ -518,7 +519,7 @@ func TestMultipleCAS(t *testing.T) { r = getData(t, kv, "test") require.True(t, r != nil, "nil ring") - for i := 0; i < members; i++ { + for i := range members { n := fmt.Sprintf(namePattern, i) if r.Members[n].State != LEFT { @@ -540,7 +541,7 @@ func TestMultipleClients(t *testing.T) { port := 0 - for i := 0; i < members; i++ { + for i := range members { id := fmt.Sprintf("Member-%d", i) var cfg KVConfig flagext.DefaultValues(&cfg) @@ -581,7 +582,7 @@ func TestMultipleClients(t *testing.T) { firstKv := clients[0] ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) updates := 0 - firstKv.WatchKey(ctx, key, func(in interface{}) bool { + firstKv.WatchKey(ctx, key, func(in any) bool { updates++ r := in.(*data) @@ -610,7 +611,7 @@ func TestMultipleClients(t *testing.T) { // And same tokens. allTokens := []uint32(nil) - for i := 0; i < members; i++ { + for i := range members { kv := clients[i] r := getData(t, kv, key) @@ -743,7 +744,7 @@ func TestJoinMembersWithRetryBackoff(t *testing.T) { firstKv := clients[0] ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) observedMembers := 0 - firstKv.WatchKey(ctx, key, func(in interface{}) bool { + firstKv.WatchKey(ctx, key, func(in any) bool { r := in.(*data) observedMembers = len(r.Members) @@ -823,7 +824,7 @@ func TestMemberlistJoinOnStarting(t *testing.T) { mkv2 := NewKV(cfg2, log.NewNopLogger(), &dnsProviderMock{}, prometheus.NewPedanticRegistry()) require.NoError(t, mkv2.starting(context.Background())) - membersFunc := func() interface{} { + membersFunc := func() any { return mkv2.memberlist.NumMembers() } @@ -832,7 +833,7 @@ func TestMemberlistJoinOnStarting(t *testing.T) { func getFreePorts(count int) ([]int, error) { var ports []int - for i := 0; i < count; i++ { + for range count { addr, err := net.ResolveTCPAddr("tcp", "127.0.0.1:0") if err != nil { return nil, err @@ -945,11 +946,9 @@ func (dc distributedCounter) RemoveTombstones(limit time.Time) (_, _ int) { return } -func (dc distributedCounter) Clone() interface{} { +func (dc distributedCounter) Clone() any { out := make(distributedCounter, len(dc)) - for k, v := range dc { - out[k] = v - } + maps.Copy(out, dc) return out } @@ -959,25 +958,25 @@ func (d distributedCounterCodec) CodecID() string { return "distributedCounter" } -func (d distributedCounterCodec) Decode(b []byte) (interface{}, error) { +func (d distributedCounterCodec) Decode(b []byte) (any, error) { dec := gob.NewDecoder(bytes.NewBuffer(b)) out := &distributedCounter{} err := dec.Decode(out) return *out, err } -func (d distributedCounterCodec) Encode(val interface{}) ([]byte, error) { +func (d distributedCounterCodec) Encode(val any) ([]byte, error) { buf := bytes.Buffer{} enc := gob.NewEncoder(&buf) err := enc.Encode(val) return buf.Bytes(), err } -func (d distributedCounterCodec) DecodeMultiKey(map[string][]byte) (interface{}, error) { +func (d distributedCounterCodec) DecodeMultiKey(map[string][]byte) (any, error) { return nil, errors.New("distributedCounterCodec does not support DecodeMultiKey") } -func (d distributedCounterCodec) EncodeMultiKey(interface{}) (map[string][]byte, error) { +func (d distributedCounterCodec) EncodeMultiKey(any) (map[string][]byte, error) { return nil, errors.New("distributedCounterCodec does not support EncodeMultiKey") } @@ -1006,7 +1005,7 @@ func TestMultipleCodecs(t *testing.T) { kv2, err := NewClient(mkv1, distributedCounterCodec{}) require.NoError(t, err) - err = kv1.CAS(context.Background(), "data", func(in interface{}) (out interface{}, retry bool, err error) { + err = kv1.CAS(context.Background(), "data", func(in any) (out any, retry bool, err error) { var d *data if in != nil { d = in.(*data) @@ -1025,7 +1024,7 @@ func TestMultipleCodecs(t *testing.T) { }) require.NoError(t, err) - err = kv2.CAS(context.Background(), "counter", func(in interface{}) (out interface{}, retry bool, err error) { + err = kv2.CAS(context.Background(), "counter", func(in any) (out any, retry bool, err error) { var dc distributedCounter if in != nil { dc = in.(distributedCounter) @@ -1099,7 +1098,7 @@ func TestRejoin(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(context.Background(), mkv2)) defer services.StopAndAwaitTerminated(context.Background(), mkv2) //nolint:errcheck - membersFunc := func() interface{} { + membersFunc := func() any { return mkv2.memberlist.NumMembers() } @@ -1156,7 +1155,7 @@ func TestNotifyMsgResendsOnlyChanges(t *testing.T) { now := time.Now() - require.NoError(t, client.CAS(context.Background(), key, func(in interface{}) (out interface{}, retry bool, err error) { + require.NoError(t, client.CAS(context.Background(), key, func(in any) (out any, retry bool, err error) { d := getOrCreateData(in) d.Members["a"] = member{Timestamp: now.Unix(), State: JOINING} d.Members["b"] = member{Timestamp: now.Unix(), State: JOINING} @@ -1299,7 +1298,7 @@ func decodeDataFromMarshalledKeyValuePair(t *testing.T, marshalledKVP []byte, ke return d } -func marshalKeyValuePair(t *testing.T, key string, codec codec.Codec, value interface{}) []byte { +func marshalKeyValuePair(t *testing.T, key string, codec codec.Codec, value any) []byte { data, err := codec.Encode(value) require.NoError(t, err) @@ -1309,7 +1308,7 @@ func marshalKeyValuePair(t *testing.T, key string, codec codec.Codec, value inte return data } -func getOrCreateData(in interface{}) *data { +func getOrCreateData(in any) *data { // Modify value that was passed as a parameter. // Client takes care of concurrent modifications. r, ok := in.(*data) @@ -1320,7 +1319,7 @@ func getOrCreateData(in interface{}) *data { } // poll repeatedly evaluates condition until we either timeout, or it succeeds. -func poll(t testing.TB, d time.Duration, want interface{}, have func() interface{}) { +func poll(t testing.TB, d time.Duration, want any, have func() any) { t.Helper() deadline := time.Now().Add(d) @@ -1340,7 +1339,7 @@ func poll(t testing.TB, d time.Duration, want interface{}, have func() interface type testLogger struct { } -func (l testLogger) Log(keyvals ...interface{}) error { +func (l testLogger) Log(keyvals ...any) error { return nil } diff --git a/pkg/ring/kv/memberlist/memberlist_logger.go b/pkg/ring/kv/memberlist/memberlist_logger.go index 30a28d0685..4574216b98 100644 --- a/pkg/ring/kv/memberlist/memberlist_logger.go +++ b/pkg/ring/kv/memberlist/memberlist_logger.go @@ -30,7 +30,7 @@ func newMemberlistLoggerAdapter(logger log.Logger, logTimestamp bool) io.Writer func (a loggerAdapter) Write(p []byte) (int, error) { result := subexps(p) - keyvals := []interface{}{} + keyvals := []any{} var timestamp string if date, ok := result["date"]; ok && date != "" { timestamp = date diff --git a/pkg/ring/kv/memberlist/metrics.go b/pkg/ring/kv/memberlist/metrics.go index 4dfd23a11e..759140de88 100644 --- a/pkg/ring/kv/memberlist/metrics.go +++ b/pkg/ring/kv/memberlist/metrics.go @@ -3,9 +3,9 @@ package memberlist import ( "time" - armonmetrics "github.com/armon/go-metrics" - armonprometheus "github.com/armon/go-metrics/prometheus" "github.com/go-kit/log/level" + armonmetrics "github.com/hashicorp/go-metrics" + armonprometheus "github.com/hashicorp/go-metrics/prometheus" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" diff --git a/pkg/ring/kv/metrics.go b/pkg/ring/kv/metrics.go index 38ec3b59cd..30ed8ff4aa 100644 --- a/pkg/ring/kv/metrics.go +++ b/pkg/ring/kv/metrics.go @@ -71,8 +71,8 @@ func (m metrics) List(ctx context.Context, prefix string) ([]string, error) { return result, err } -func (m metrics) Get(ctx context.Context, key string) (interface{}, error) { - var result interface{} +func (m metrics) Get(ctx context.Context, key string) (any, error) { + var result any err := instrument.CollectedRequest(ctx, "GET", m.requestDuration, instrument.ErrorCode, func(ctx context.Context) error { var err error result, err = m.c.Get(ctx, key) @@ -88,20 +88,20 @@ func (m metrics) Delete(ctx context.Context, key string) error { return err } -func (m metrics) CAS(ctx context.Context, key string, f func(in interface{}) (out interface{}, retry bool, err error)) error { +func (m metrics) CAS(ctx context.Context, key string, f func(in any) (out any, retry bool, err error)) error { return instrument.CollectedRequest(ctx, "CAS", m.requestDuration, getCasErrorCode, func(ctx context.Context) error { return m.c.CAS(ctx, key, f) }) } -func (m metrics) WatchKey(ctx context.Context, key string, f func(interface{}) bool) { +func (m metrics) WatchKey(ctx context.Context, key string, f func(any) bool) { _ = instrument.CollectedRequest(ctx, "WatchKey", m.requestDuration, instrument.ErrorCode, func(ctx context.Context) error { m.c.WatchKey(ctx, key, f) return nil }) } -func (m metrics) WatchPrefix(ctx context.Context, prefix string, f func(string, interface{}) bool) { +func (m metrics) WatchPrefix(ctx context.Context, prefix string, f func(string, any) bool) { _ = instrument.CollectedRequest(ctx, "WatchPrefix", m.requestDuration, instrument.ErrorCode, func(ctx context.Context) error { m.c.WatchPrefix(ctx, prefix, f) return nil diff --git a/pkg/ring/kv/mock.go b/pkg/ring/kv/mock.go index cbe23106a8..f889be60d8 100644 --- a/pkg/ring/kv/mock.go +++ b/pkg/ring/kv/mock.go @@ -21,7 +21,7 @@ func (m mockClient) List(ctx context.Context, prefix string) ([]string, error) { return []string{}, nil } -func (m mockClient) Get(ctx context.Context, key string) (interface{}, error) { +func (m mockClient) Get(ctx context.Context, key string) (any, error) { return "", nil } @@ -29,14 +29,14 @@ func (m mockClient) Delete(ctx context.Context, key string) error { return nil } -func (m mockClient) CAS(ctx context.Context, key string, f func(in interface{}) (out interface{}, retry bool, err error)) error { +func (m mockClient) CAS(ctx context.Context, key string, f func(in any) (out any, retry bool, err error)) error { return nil } -func (m mockClient) WatchKey(ctx context.Context, key string, f func(interface{}) bool) { +func (m mockClient) WatchKey(ctx context.Context, key string, f func(any) bool) { } -func (m mockClient) WatchPrefix(ctx context.Context, prefix string, f func(string, interface{}) bool) { +func (m mockClient) WatchPrefix(ctx context.Context, prefix string, f func(string, any) bool) { } func (m mockClient) LastUpdateTime(key string) time.Time { diff --git a/pkg/ring/kv/multi.go b/pkg/ring/kv/multi.go index e4ac994d76..98c2a04b65 100644 --- a/pkg/ring/kv/multi.go +++ b/pkg/ring/kv/multi.go @@ -290,7 +290,7 @@ func (m *MultiClient) List(ctx context.Context, prefix string) ([]string, error) } // Get is a part of kv.Client interface. -func (m *MultiClient) Get(ctx context.Context, key string) (interface{}, error) { +func (m *MultiClient) Get(ctx context.Context, key string) (any, error) { _, kv := m.getPrimaryClient() return kv.client.Get(ctx, key) } @@ -302,11 +302,11 @@ func (m *MultiClient) Delete(ctx context.Context, key string) error { } // CAS is a part of kv.Client interface. -func (m *MultiClient) CAS(ctx context.Context, key string, f func(in interface{}) (out interface{}, retry bool, err error)) error { +func (m *MultiClient) CAS(ctx context.Context, key string, f func(in any) (out any, retry bool, err error)) error { _, kv := m.getPrimaryClient() - updatedValue := interface{}(nil) - err := kv.client.CAS(ctx, key, func(in interface{}) (interface{}, bool, error) { + updatedValue := any(nil) + err := kv.client.CAS(ctx, key, func(in any) (any, bool, error) { out, retry, err := f(in) updatedValue = out return out, retry, err @@ -320,7 +320,7 @@ func (m *MultiClient) CAS(ctx context.Context, key string, f func(in interface{} } // WatchKey is a part of kv.Client interface. -func (m *MultiClient) WatchKey(ctx context.Context, key string, f func(interface{}) bool) { +func (m *MultiClient) WatchKey(ctx context.Context, key string, f func(any) bool) { _ = m.runWithPrimaryClient(ctx, func(newCtx context.Context, primary kvclient) error { primary.client.WatchKey(newCtx, key, f) return newCtx.Err() @@ -328,7 +328,7 @@ func (m *MultiClient) WatchKey(ctx context.Context, key string, f func(interface } // WatchPrefix is a part of kv.Client interface. -func (m *MultiClient) WatchPrefix(ctx context.Context, prefix string, f func(string, interface{}) bool) { +func (m *MultiClient) WatchPrefix(ctx context.Context, prefix string, f func(string, any) bool) { _ = m.runWithPrimaryClient(ctx, func(newCtx context.Context, primary kvclient) error { primary.client.WatchPrefix(newCtx, prefix, f) return newCtx.Err() @@ -340,7 +340,7 @@ func (m *MultiClient) LastUpdateTime(key string) time.Time { return kv.client.LastUpdateTime(key) } -func (m *MultiClient) writeToSecondary(ctx context.Context, primary kvclient, key string, newValue interface{}) { +func (m *MultiClient) writeToSecondary(ctx context.Context, primary kvclient, key string, newValue any) { if m.mirrorTimeout > 0 { var cfn context.CancelFunc ctx, cfn = context.WithTimeout(ctx, m.mirrorTimeout) @@ -354,7 +354,7 @@ func (m *MultiClient) writeToSecondary(ctx context.Context, primary kvclient, ke } m.mirrorWritesCounter.Inc() - err := kvc.client.CAS(ctx, key, func(in interface{}) (out interface{}, retry bool, err error) { + err := kvc.client.CAS(ctx, key, func(in any) (out any, retry bool, err error) { // try once return newValue, false, nil }) diff --git a/pkg/ring/kv/prefix.go b/pkg/ring/kv/prefix.go index aba9b7a092..d9406b4ff6 100644 --- a/pkg/ring/kv/prefix.go +++ b/pkg/ring/kv/prefix.go @@ -37,24 +37,24 @@ func (c *prefixedKVClient) List(ctx context.Context, prefix string) ([]string, e // CAS atomically modifies a value in a callback. If the value doesn't exist, // you'll get 'nil' as an argument to your callback. -func (c *prefixedKVClient) CAS(ctx context.Context, key string, f func(in interface{}) (out interface{}, retry bool, err error)) error { +func (c *prefixedKVClient) CAS(ctx context.Context, key string, f func(in any) (out any, retry bool, err error)) error { return c.client.CAS(ctx, c.prefix+key, f) } // WatchKey watches a key. -func (c *prefixedKVClient) WatchKey(ctx context.Context, key string, f func(interface{}) bool) { +func (c *prefixedKVClient) WatchKey(ctx context.Context, key string, f func(any) bool) { c.client.WatchKey(ctx, c.prefix+key, f) } // WatchPrefix watches a prefix. For a prefix client it appends the prefix argument to the clients prefix. -func (c *prefixedKVClient) WatchPrefix(ctx context.Context, prefix string, f func(string, interface{}) bool) { - c.client.WatchPrefix(ctx, fmt.Sprintf("%s%s", c.prefix, prefix), func(k string, i interface{}) bool { +func (c *prefixedKVClient) WatchPrefix(ctx context.Context, prefix string, f func(string, any) bool) { + c.client.WatchPrefix(ctx, fmt.Sprintf("%s%s", c.prefix, prefix), func(k string, i any) bool { return f(strings.TrimPrefix(k, c.prefix), i) }) } // Get looks up a given object from its key. -func (c *prefixedKVClient) Get(ctx context.Context, key string) (interface{}, error) { +func (c *prefixedKVClient) Get(ctx context.Context, key string) (any, error) { return c.client.Get(ctx, c.prefix+key) } diff --git a/pkg/ring/lifecycler.go b/pkg/ring/lifecycler.go index 1a6812a941..6038de2277 100644 --- a/pkg/ring/lifecycler.go +++ b/pkg/ring/lifecycler.go @@ -446,7 +446,7 @@ func (i *Lifecycler) ClaimTokensFor(ctx context.Context, ingesterID string) erro fn := func() { var tokens Tokens - claimTokens := func(in interface{}) (out interface{}, retry bool, err error) { + claimTokens := func(in any) (out any, retry bool, err error) { ringDesc, ok := in.(*Desc) if !ok || ringDesc == nil { return nil, false, fmt.Errorf("cannot claim tokens in an empty ring") @@ -722,7 +722,7 @@ func (i *Lifecycler) initRing(ctx context.Context) (bool, error) { level.Info(i.logger).Log("msg", "not loading tokens from file, tokens file path is empty") } - err = i.KVStore.CAS(ctx, i.RingKey, func(in interface{}) (out interface{}, retry bool, err error) { + err = i.KVStore.CAS(ctx, i.RingKey, func(in any) (out any, retry bool, err error) { if in == nil { ringDesc = NewDesc() } else { @@ -821,7 +821,7 @@ func (i *Lifecycler) RenewTokens(ratio float64, ctx context.Context) { if ratio > 1 { ratio = 1 } - err := i.KVStore.CAS(ctx, i.RingKey, func(in interface{}) (out interface{}, retry bool, err error) { + err := i.KVStore.CAS(ctx, i.RingKey, func(in any) (out any, retry bool, err error) { if in == nil { return in, false, nil } @@ -837,7 +837,7 @@ func (i *Lifecycler) RenewTokens(ratio float64, ctx context.Context) { ringTokens, _ := ringDesc.TokensFor(i.ID) // Removing random tokens - for i := 0; i < tokensToBeRenewed; i++ { + for range tokensToBeRenewed { if len(ringTokens) == 0 { break } @@ -869,7 +869,7 @@ func (i *Lifecycler) RenewTokens(ratio float64, ctx context.Context) { func (i *Lifecycler) verifyTokens(ctx context.Context) bool { result := false - err := i.KVStore.CAS(ctx, i.RingKey, func(in interface{}) (out interface{}, retry bool, err error) { + err := i.KVStore.CAS(ctx, i.RingKey, func(in any) (out any, retry bool, err error) { var ringDesc *Desc if in == nil { ringDesc = NewDesc() @@ -920,7 +920,7 @@ func (i *Lifecycler) compareTokens(fromRing Tokens) bool { return false } - for i := 0; i < len(tokens); i++ { + for i := range tokens { if tokens[i] != fromRing[i] { return false } @@ -932,7 +932,7 @@ func (i *Lifecycler) compareTokens(fromRing Tokens) bool { func (i *Lifecycler) autoJoin(ctx context.Context, targetState InstanceState, alreadyInRing bool) error { var ringDesc *Desc - err := i.KVStore.CAS(ctx, i.RingKey, func(in interface{}) (out interface{}, retry bool, err error) { + err := i.KVStore.CAS(ctx, i.RingKey, func(in any) (out any, retry bool, err error) { if in == nil { ringDesc = NewDesc() } else { @@ -987,7 +987,7 @@ func (i *Lifecycler) autoJoin(ctx context.Context, targetState InstanceState, al func (i *Lifecycler) updateConsul(ctx context.Context) error { var ringDesc *Desc - err := i.KVStore.CAS(ctx, i.RingKey, func(in interface{}) (out interface{}, retry bool, err error) { + err := i.KVStore.CAS(ctx, i.RingKey, func(in any) (out any, retry bool, err error) { if in == nil { ringDesc = NewDesc() } else { @@ -1121,7 +1121,7 @@ func (i *Lifecycler) processShutdown(ctx context.Context) { func (i *Lifecycler) unregister(ctx context.Context) error { level.Debug(i.logger).Log("msg", "unregistering instance from ring", "ring", i.RingName) - return i.KVStore.CAS(ctx, i.RingKey, func(in interface{}) (out interface{}, retry bool, err error) { + return i.KVStore.CAS(ctx, i.RingKey, func(in any) (out any, retry bool, err error) { if in == nil { return nil, false, fmt.Errorf("found empty ring when trying to unregister") } diff --git a/pkg/ring/lifecycler_test.go b/pkg/ring/lifecycler_test.go index 6778e053eb..b7dc0afb3e 100644 --- a/pkg/ring/lifecycler_test.go +++ b/pkg/ring/lifecycler_test.go @@ -47,7 +47,7 @@ func testLifecyclerConfigWithAddr(ringConfig Config, id string, addr string) Lif return l } -func checkNormalised(d interface{}, id string) bool { +func checkNormalised(d any, id string) bool { desc, ok := d.(*Desc) return ok && len(desc.Ingesters) == 1 && @@ -117,7 +117,7 @@ func TestLifecycler_RenewTokens(t *testing.T) { require.Len(t, newTokens, 512) require.IsIncreasing(t, newTokens) diff := 0 - for i := 0; i < len(originalTokens); i++ { + for i := range originalTokens { if !slices.Contains(originalTokens, newTokens[i]) { diff++ } @@ -203,7 +203,7 @@ func TestLifecycler_HealthyInstancesCount(t *testing.T) { defer services.StopAndAwaitTerminated(ctx, lifecycler1) // nolint:errcheck // Assert the first ingester joined the ring - test.Poll(t, 1000*time.Millisecond, true, func() interface{} { + test.Poll(t, 1000*time.Millisecond, true, func() any { return lifecycler1.HealthyInstancesCount() == 1 }) @@ -220,12 +220,12 @@ func TestLifecycler_HealthyInstancesCount(t *testing.T) { defer services.StopAndAwaitTerminated(ctx, lifecycler2) // nolint:errcheck // Assert the second ingester joined the ring - test.Poll(t, 1000*time.Millisecond, true, func() interface{} { + test.Poll(t, 1000*time.Millisecond, true, func() any { return lifecycler2.HealthyInstancesCount() == 2 }) // Assert the first ingester count is updated - test.Poll(t, 1000*time.Millisecond, true, func() interface{} { + test.Poll(t, 1000*time.Millisecond, true, func() any { return lifecycler1.HealthyInstancesCount() == 2 }) } @@ -265,7 +265,7 @@ func TestLifecycler_ZonesCount(t *testing.T) { defer services.StopAndAwaitTerminated(ctx, lifecycler) // nolint:errcheck // Wait until joined. - test.Poll(t, time.Second, idx+1, func() interface{} { + test.Poll(t, time.Second, idx+1, func() any { return lifecycler.HealthyInstancesCount() }) @@ -288,7 +288,7 @@ func TestLifecycler_NilFlushTransferer(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(context.Background(), lifecycler)) // Ensure the lifecycler joined the ring - test.Poll(t, time.Second, 1, func() interface{} { + test.Poll(t, time.Second, 1, func() any { return lifecycler.HealthyInstancesCount() }) @@ -322,11 +322,11 @@ func TestLifecycler_TwoRingsWithDifferentKeysOnTheSameKVStore(t *testing.T) { // Ensure each lifecycler reports 1 healthy instance, because they're // in a different ring - test.Poll(t, time.Second, 1, func() interface{} { + test.Poll(t, time.Second, 1, func() any { return lifecycler1.HealthyInstancesCount() }) - test.Poll(t, time.Second, 1, func() interface{} { + test.Poll(t, time.Second, 1, func() any { return lifecycler2.HealthyInstancesCount() }) } @@ -358,7 +358,7 @@ func TestLifecycler_ShouldHandleInstanceAbruptlyRestarted(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(context.Background(), l1)) // Check this ingester joined, is active, and has one token. - test.Poll(t, 1000*time.Millisecond, true, func() interface{} { + test.Poll(t, 1000*time.Millisecond, true, func() any { d, err := r.KVClient.Get(context.Background(), ringKey) require.NoError(t, err) return checkNormalised(d, "ing1") @@ -377,7 +377,7 @@ func TestLifecycler_ShouldHandleInstanceAbruptlyRestarted(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(context.Background(), l2)) // Check the new ingester picked up the same tokens and registered timestamp. - test.Poll(t, 1000*time.Millisecond, true, func() interface{} { + test.Poll(t, 1000*time.Millisecond, true, func() any { d, err := r.KVClient.Get(context.Background(), ringKey) require.NoError(t, err) @@ -389,11 +389,11 @@ func TestLifecycler_ShouldHandleInstanceAbruptlyRestarted(t *testing.T) { type MockClient struct { ListFunc func(ctx context.Context, prefix string) ([]string, error) - GetFunc func(ctx context.Context, key string) (interface{}, error) + GetFunc func(ctx context.Context, key string) (any, error) DeleteFunc func(ctx context.Context, key string) error - CASFunc func(ctx context.Context, key string, f func(in interface{}) (out interface{}, retry bool, err error)) error - WatchKeyFunc func(ctx context.Context, key string, f func(interface{}) bool) - WatchPrefixFunc func(ctx context.Context, prefix string, f func(string, interface{}) bool) + CASFunc func(ctx context.Context, key string, f func(in any) (out any, retry bool, err error)) error + WatchKeyFunc func(ctx context.Context, key string, f func(any) bool) + WatchPrefixFunc func(ctx context.Context, prefix string, f func(string, any) bool) } func (m *MockClient) List(ctx context.Context, prefix string) ([]string, error) { @@ -404,7 +404,7 @@ func (m *MockClient) List(ctx context.Context, prefix string) ([]string, error) return nil, nil } -func (m *MockClient) Get(ctx context.Context, key string) (interface{}, error) { +func (m *MockClient) Get(ctx context.Context, key string) (any, error) { if m.GetFunc != nil { return m.GetFunc(ctx, key) } @@ -420,7 +420,7 @@ func (m *MockClient) Delete(ctx context.Context, key string) error { return nil } -func (m *MockClient) CAS(ctx context.Context, key string, f func(in interface{}) (out interface{}, retry bool, err error)) error { +func (m *MockClient) CAS(ctx context.Context, key string, f func(in any) (out any, retry bool, err error)) error { if m.CASFunc != nil { return m.CASFunc(ctx, key, f) } @@ -428,13 +428,13 @@ func (m *MockClient) CAS(ctx context.Context, key string, f func(in interface{}) return nil } -func (m *MockClient) WatchKey(ctx context.Context, key string, f func(interface{}) bool) { +func (m *MockClient) WatchKey(ctx context.Context, key string, f func(any) bool) { if m.WatchKeyFunc != nil { m.WatchKeyFunc(ctx, key, f) } } -func (m *MockClient) WatchPrefix(ctx context.Context, prefix string, f func(string, interface{}) bool) { +func (m *MockClient) WatchPrefix(ctx context.Context, prefix string, f func(string, any) bool) { if m.WatchPrefixFunc != nil { m.WatchPrefixFunc(ctx, prefix, f) } @@ -525,7 +525,7 @@ func TestCheckReady_MinReadyDuration(t *testing.T) { assert.NoError(t, l.CheckReady(ctx)) } else { // Poll the readiness check until ready and measure how much time it takes. - test.Poll(t, 3*time.Second, nil, func() interface{} { + test.Poll(t, 3*time.Second, nil, func() any { return l.CheckReady(ctx) }) @@ -604,7 +604,7 @@ func TestCheckReady_CheckRingHealth(t *testing.T) { waitRingInstance(t, 3*time.Second, l2, func(instance InstanceDesc) error { return nil }) // Poll the readiness check until ready and measure how much time it takes. - test.Poll(t, 5*time.Second, nil, func() interface{} { + test.Poll(t, 5*time.Second, nil, func() any { return l1.CheckReady(ctx) }) @@ -635,7 +635,7 @@ func TestRestartIngester_DisabledHeartbeat_unregister_on_shutdown_false(t *testi // poll function waits for a condition and returning actual state of the ingesters after the condition succeed. poll := func(condition func(*Desc) bool) map[string]InstanceDesc { var ingesters map[string]InstanceDesc - test.Poll(t, 5*time.Second, true, func() interface{} { + test.Poll(t, 5*time.Second, true, func() any { d, err := r.KVClient.Get(context.Background(), ringKey) require.NoError(t, err) @@ -695,7 +695,7 @@ func TestRestartIngester_DisabledHeartbeat_unregister_on_shutdown_false(t *testi require.NoError(t, services.StopAndAwaitTerminated(context.Background(), l2)) // Simulate ingester2 crash on startup and left the ring with JOINING state - err = r.KVClient.CAS(context.Background(), ringKey, func(in interface{}) (out interface{}, retry bool, err error) { + err = r.KVClient.CAS(context.Background(), ringKey, func(in any) (out any, retry bool, err error) { desc, ok := in.(*Desc) require.Equal(t, true, ok) ingester2Desc := desc.Ingesters["ing2"] @@ -709,7 +709,7 @@ func TestRestartIngester_DisabledHeartbeat_unregister_on_shutdown_false(t *testi require.NoError(t, services.StopAndAwaitTerminated(context.Background(), l2)) // Simulate ingester2 crash on startup and left the ring with PENDING state - err = r.KVClient.CAS(context.Background(), ringKey, func(in interface{}) (out interface{}, retry bool, err error) { + err = r.KVClient.CAS(context.Background(), ringKey, func(in any) (out any, retry bool, err error) { desc, ok := in.(*Desc) require.Equal(t, true, ok) ingester2Desc := desc.Ingesters["ing2"] @@ -757,7 +757,7 @@ func TestRestartIngester_READONLY(t *testing.T) { // poll function waits for a condition and returning actual state of the ingesters after the condition succeed. poll := func(condition func(*Desc) bool) map[string]InstanceDesc { var ingesters map[string]InstanceDesc - test.Poll(t, 5*time.Second, true, func() interface{} { + test.Poll(t, 5*time.Second, true, func() any { d, err := r.KVClient.Get(context.Background(), ringKey) require.NoError(t, err) @@ -854,7 +854,7 @@ func TestTokenFileOnDisk(t *testing.T) { // Check this ingester joined, is active, and has 512 token. var expTokens []uint32 - test.Poll(t, 1000*time.Millisecond, true, func() interface{} { + test.Poll(t, 1000*time.Millisecond, true, func() any { d, err := r.KVClient.Get(context.Background(), ringKey) require.NoError(t, err) @@ -871,7 +871,7 @@ func TestTokenFileOnDisk(t *testing.T) { // Change state from ACTIVE to READONLY err = l1.ChangeState(context.Background(), READONLY) require.NoError(t, err) - test.Poll(t, 1000*time.Millisecond, true, func() interface{} { + test.Poll(t, 1000*time.Millisecond, true, func() any { d, err := r.KVClient.Get(context.Background(), ringKey) require.NoError(t, err) @@ -891,7 +891,7 @@ func TestTokenFileOnDisk(t *testing.T) { // Check this ingester joined, is active, and has 512 token. var actTokens []uint32 - test.Poll(t, 1000*time.Millisecond, true, func() interface{} { + test.Poll(t, 1000*time.Millisecond, true, func() any { d, err := r.KVClient.Get(context.Background(), ringKey) require.NoError(t, err) desc, ok := d.(*Desc) @@ -907,7 +907,7 @@ func TestTokenFileOnDisk(t *testing.T) { // Check for same tokens. slices.Sort(expTokens) slices.Sort(actTokens) - for i := 0; i < 512; i++ { + for range 512 { require.Equal(t, expTokens, actTokens) } } @@ -937,7 +937,7 @@ func TestRegisteredAtOnBackToActive(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(context.Background(), l1)) // Check this ingester joined, is active. - test.Poll(t, 1000*time.Millisecond, true, func() interface{} { + test.Poll(t, 1000*time.Millisecond, true, func() any { d, err := r.KVClient.Get(context.Background(), ringKey) require.NoError(t, err) @@ -957,7 +957,7 @@ func TestRegisteredAtOnBackToActive(t *testing.T) { // Change state from ACTIVE to READONLY err = l1.ChangeState(context.Background(), READONLY) require.NoError(t, err) - test.Poll(t, 1000*time.Millisecond, true, func() interface{} { + test.Poll(t, 1000*time.Millisecond, true, func() any { d, err := r.KVClient.Get(context.Background(), ringKey) require.NoError(t, err) @@ -972,7 +972,7 @@ func TestRegisteredAtOnBackToActive(t *testing.T) { // Change state from READONLY to ACTIVE err = l1.ChangeState(context.Background(), ACTIVE) require.NoError(t, err) - test.Poll(t, 1000*time.Millisecond, true, func() interface{} { + test.Poll(t, 1000*time.Millisecond, true, func() any { d, err := r.KVClient.Get(context.Background(), ringKey) require.NoError(t, err) @@ -1021,7 +1021,7 @@ func TestTokenFileOnDisk_WithoutAutoJoinOnStartup(t *testing.T) { // Check this ingester joined, is active, and has 512 token. var expTokens []uint32 - test.Poll(t, 1000*time.Millisecond, true, func() interface{} { + test.Poll(t, 1000*time.Millisecond, true, func() any { d, err := r.KVClient.Get(context.Background(), ringKey) require.NoError(t, err) @@ -1038,7 +1038,7 @@ func TestTokenFileOnDisk_WithoutAutoJoinOnStartup(t *testing.T) { // Change state from ACTIVE to READONLY err = l1.ChangeState(context.Background(), READONLY) require.NoError(t, err) - test.Poll(t, 1000*time.Millisecond, true, func() interface{} { + test.Poll(t, 1000*time.Millisecond, true, func() any { d, err := r.KVClient.Get(context.Background(), ringKey) require.NoError(t, err) @@ -1057,7 +1057,7 @@ func TestTokenFileOnDisk_WithoutAutoJoinOnStartup(t *testing.T) { defer services.StopAndAwaitTerminated(context.Background(), l2) //nolint:errcheck // Check this ingester should not in the ring before calling Join - test.Poll(t, 1000*time.Millisecond, true, func() interface{} { + test.Poll(t, 1000*time.Millisecond, true, func() any { d, err := r.KVClient.Get(context.Background(), ringKey) require.NoError(t, err) desc, ok := d.(*Desc) @@ -1073,7 +1073,7 @@ func TestTokenFileOnDisk_WithoutAutoJoinOnStartup(t *testing.T) { // Check this ingester joined, is in readonly state, and has 512 token. var actTokens []uint32 - test.Poll(t, 1000*time.Millisecond, true, func() interface{} { + test.Poll(t, 1000*time.Millisecond, true, func() any { d, err := r.KVClient.Get(context.Background(), ringKey) require.NoError(t, err) desc, ok := d.(*Desc) @@ -1089,7 +1089,7 @@ func TestTokenFileOnDisk_WithoutAutoJoinOnStartup(t *testing.T) { // Check for same tokens. slices.Sort(expTokens) slices.Sort(actTokens) - for i := 0; i < 512; i++ { + for range 512 { require.Equal(t, expTokens, actTokens) } } @@ -1113,7 +1113,7 @@ func TestJoinInLeavingState(t *testing.T) { cfg.MinReadyDuration = 1 * time.Nanosecond // Set state as LEAVING - err = r.KVClient.CAS(context.Background(), ringKey, func(in interface{}) (interface{}, bool, error) { + err = r.KVClient.CAS(context.Background(), ringKey, func(in any) (any, bool, error) { r := &Desc{ Ingesters: map[string]InstanceDesc{ "ing1": { @@ -1135,7 +1135,7 @@ func TestJoinInLeavingState(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(context.Background(), l1)) // Check that the lifecycler was able to join after coming up in LEAVING - test.Poll(t, 1000*time.Millisecond, true, func() interface{} { + test.Poll(t, 1000*time.Millisecond, true, func() any { d, err := r.KVClient.Get(context.Background(), ringKey) require.NoError(t, err) @@ -1166,7 +1166,7 @@ func TestJoinInLeavingStateAndLessTokens(t *testing.T) { cfg.MinReadyDuration = 1 * time.Nanosecond // Set state as LEAVING and 1 less token because of conflict resolution - err = r.KVClient.CAS(context.Background(), ringKey, func(in interface{}) (interface{}, bool, error) { + err = r.KVClient.CAS(context.Background(), ringKey, func(in any) (any, bool, error) { r := &Desc{ Ingesters: map[string]InstanceDesc{ "ing1": { @@ -1188,7 +1188,7 @@ func TestJoinInLeavingStateAndLessTokens(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(context.Background(), l1)) // Check that the lifecycler was able to join after coming up in LEAVING - test.Poll(t, 1000*time.Millisecond, true, func() interface{} { + test.Poll(t, 1000*time.Millisecond, true, func() any { d, err := r.KVClient.Get(context.Background(), ringKey) require.NoError(t, err) @@ -1222,7 +1222,7 @@ func TestJoinInJoiningState(t *testing.T) { instance2RegisteredAt := time.Now().Add(-2 * time.Hour) // Set state as JOINING - err = r.KVClient.CAS(context.Background(), ringKey, func(in interface{}) (interface{}, bool, error) { + err = r.KVClient.CAS(context.Background(), ringKey, func(in any) (any, bool, error) { r := &Desc{ Ingesters: map[string]InstanceDesc{ "ing1": { @@ -1246,7 +1246,7 @@ func TestJoinInJoiningState(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(context.Background(), l1)) // Check that the lifecycler was able to join after coming up in JOINING - test.Poll(t, 1000*time.Millisecond, true, func() interface{} { + test.Poll(t, 1000*time.Millisecond, true, func() any { d, err := r.KVClient.Get(context.Background(), ringKey) require.NoError(t, err) @@ -1282,7 +1282,7 @@ func TestRestoreOfZoneWhenOverwritten(t *testing.T) { cfg := testLifecyclerConfig(ringConfig, "ing1") // Set ing1 to not have a zone - err = r.KVClient.CAS(context.Background(), ringKey, func(in interface{}) (interface{}, bool, error) { + err = r.KVClient.CAS(context.Background(), ringKey, func(in any) (any, bool, error) { r := &Desc{ Ingesters: map[string]InstanceDesc{ "ing1": { @@ -1305,7 +1305,7 @@ func TestRestoreOfZoneWhenOverwritten(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(context.Background(), l1)) // Check that the lifecycler was able to reset the zone value to the expected setting - test.Poll(t, 1000*time.Millisecond, true, func() interface{} { + test.Poll(t, 1000*time.Millisecond, true, func() any { d, err := r.KVClient.Get(context.Background(), ringKey) require.NoError(t, err) desc, ok := d.(*Desc) @@ -1318,7 +1318,7 @@ func TestRestoreOfZoneWhenOverwritten(t *testing.T) { } func waitRingInstance(t *testing.T, timeout time.Duration, l *Lifecycler, check func(instance InstanceDesc) error) { - test.Poll(t, timeout, nil, func() interface{} { + test.Poll(t, timeout, nil, func() any { desc, err := l.KVStore.Get(context.Background(), l.RingKey) if err != nil { return err diff --git a/pkg/ring/model.go b/pkg/ring/model.go index 70b767740e..82d0f9ccb3 100644 --- a/pkg/ring/model.go +++ b/pkg/ring/model.go @@ -3,6 +3,7 @@ package ring import ( "container/heap" "fmt" + "maps" "sort" "sync" "time" @@ -355,7 +356,7 @@ func tokensEqual(lhs, rhs []uint32) bool { if len(lhs) != len(rhs) { return false } - for i := 0; i < len(lhs); i++ { + for i := range lhs { if lhs[i] != rhs[i] { return false } @@ -363,7 +364,7 @@ func tokensEqual(lhs, rhs []uint32) bool { return true } -var tokenMapPool = sync.Pool{New: func() interface{} { return make(map[uint32]struct{}) }} +var tokenMapPool = sync.Pool{New: func() any { return make(map[uint32]struct{}) }} func conflictingTokensExist(normalizedIngesters map[string]InstanceDesc) bool { tokensMap := tokenMapPool.Get().(map[uint32]struct{}) @@ -472,7 +473,7 @@ func (d *Desc) RemoveTombstones(limit time.Time) (total, removed int) { } // Clone returns a deep copy of the ring state. -func (d *Desc) Clone() interface{} { +func (d *Desc) Clone() any { return proto.Clone(d).(*Desc) } @@ -626,7 +627,7 @@ func (d *Desc) RingCompare(o *Desc) CompareResult { return Equal } -func GetOrCreateRingDesc(d interface{}) *Desc { +func GetOrCreateRingDesc(d any) *Desc { if d == nil { return NewDesc() } @@ -649,11 +650,11 @@ func (h TokensHeap) Less(i, j int) bool { return h[i][0] < h[j][0] } -func (h *TokensHeap) Push(x interface{}) { +func (h *TokensHeap) Push(x any) { *h = append(*h, x.([]uint32)) } -func (h *TokensHeap) Pop() interface{} { +func (h *TokensHeap) Pop() any { old := *h n := len(old) x := old[n-1] @@ -709,8 +710,8 @@ func MergeTokensByZone(zones map[string][][]uint32) map[string][]uint32 { return out } -func (d *Desc) SplitByID() map[string]interface{} { - out := make(map[string]interface{}, len(d.Ingesters)) +func (d *Desc) SplitByID() map[string]any { + out := make(map[string]any, len(d.Ingesters)) for key := range d.Ingesters { in := d.Ingesters[key] out[key] = &in @@ -718,7 +719,7 @@ func (d *Desc) SplitByID() map[string]interface{} { return out } -func (d *Desc) JoinIds(in map[string]interface{}) { +func (d *Desc) JoinIds(in map[string]any) { for key, value := range in { d.Ingesters[key] = *(value.(*InstanceDesc)) } @@ -728,7 +729,7 @@ func (d *Desc) GetItemFactory() proto.Message { return &InstanceDesc{} } -func (d *Desc) FindDifference(o codec.MultiKey) (interface{}, []string, error) { +func (d *Desc) FindDifference(o codec.MultiKey) (any, []string, error) { out, ok := o.(*Desc) if !ok { // This method only deals with non-nil rings. @@ -754,9 +755,7 @@ func (d *Desc) FindDifference(o codec.MultiKey) (interface{}, []string, error) { //If existent data is empty if d == nil { - for key, value := range out.Ingesters { - toUpdated.Ingesters[key] = value - } + maps.Copy(toUpdated.Ingesters, out.Ingesters) return toUpdated, toDelete, nil } diff --git a/pkg/ring/model_test.go b/pkg/ring/model_test.go index f34b6e566d..16295ff354 100644 --- a/pkg/ring/model_test.go +++ b/pkg/ring/model_test.go @@ -48,7 +48,6 @@ func TestInstanceDesc_IsHealthy_ForIngesterOperations(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { actual := testData.ingester.IsHealthy(Write, testData.timeout, time.Now()) @@ -560,22 +559,22 @@ func TestMergeTokensByZone(t *testing.T) { func TestDesc_SplitById_JoinIds(t *testing.T) { tests := map[string]struct { ring *Desc - split map[string]interface{} + split map[string]any }{ "empty ring": { ring: &Desc{Ingesters: map[string]InstanceDesc{}}, - split: map[string]interface{}{}, + split: map[string]any{}, }, "single instance": { ring: &Desc{Ingesters: map[string]InstanceDesc{"ing1": {Addr: "addr1", Tokens: []uint32{1, 2, 3}, Timestamp: 123456, State: JOINING, Zone: "zone1", RegisteredTimestamp: 123}}}, - split: map[string]interface{}{"ing1": &InstanceDesc{Addr: "addr1", Tokens: []uint32{1, 2, 3}, Timestamp: 123456, State: JOINING, Zone: "zone1", RegisteredTimestamp: 123}}, + split: map[string]any{"ing1": &InstanceDesc{Addr: "addr1", Tokens: []uint32{1, 2, 3}, Timestamp: 123456, State: JOINING, Zone: "zone1", RegisteredTimestamp: 123}}, }, "two instances": { ring: &Desc{Ingesters: map[string]InstanceDesc{ "ing1": {Addr: "addr1", Tokens: []uint32{1, 2, 3}, Timestamp: 123456, State: JOINING, Zone: "zone1", RegisteredTimestamp: 123}, "ing2": {Addr: "addr2", Tokens: []uint32{3, 4, 5}, Timestamp: 5678, State: ACTIVE, Zone: "zone2", RegisteredTimestamp: 567}, }}, - split: map[string]interface{}{ + split: map[string]any{ "ing1": &InstanceDesc{Addr: "addr1", Tokens: []uint32{1, 2, 3}, Timestamp: 123456, State: JOINING, Zone: "zone1", RegisteredTimestamp: 123}, "ing2": &InstanceDesc{Addr: "addr2", Tokens: []uint32{3, 4, 5}, Timestamp: 5678, State: ACTIVE, Zone: "zone2", RegisteredTimestamp: 567}, }, @@ -612,8 +611,8 @@ func TestDesc_FindDifference(t *testing.T) { tests := map[string]struct { r1 *Desc r2 *Desc - toUpdate interface{} - toDelete interface{} + toUpdate any + toDelete any }{ "nil rings": { r1: nil, diff --git a/pkg/ring/replication_set.go b/pkg/ring/replication_set.go index 31e4dc016f..c534d919bb 100644 --- a/pkg/ring/replication_set.go +++ b/pkg/ring/replication_set.go @@ -27,9 +27,9 @@ type ReplicationSet struct { // Do function f in parallel for all replicas in the set, erroring is we exceed // MaxErrors and returning early otherwise. zoneResultsQuorum allows only include // results from zones that already reach quorum to improve performance. -func (r ReplicationSet) Do(ctx context.Context, delay time.Duration, zoneResultsQuorum bool, partialDataEnabled bool, f func(context.Context, *InstanceDesc) (interface{}, error)) ([]interface{}, error) { +func (r ReplicationSet) Do(ctx context.Context, delay time.Duration, zoneResultsQuorum bool, partialDataEnabled bool, f func(context.Context, *InstanceDesc) (any, error)) ([]any, error) { type instanceResult struct { - res interface{} + res any err error instance *InstanceDesc } @@ -180,7 +180,7 @@ func hasReplicationSetChangedExcluding(before, after ReplicationSet, exclude fun sort.Sort(ByAddr(beforeInstances)) sort.Sort(ByAddr(afterInstances)) - for i := 0; i < len(beforeInstances); i++ { + for i := range beforeInstances { b := beforeInstances[i] a := afterInstances[i] diff --git a/pkg/ring/replication_set_test.go b/pkg/ring/replication_set_test.go index 401ec7d409..90dbd2b9fd 100644 --- a/pkg/ring/replication_set_test.go +++ b/pkg/ring/replication_set_test.go @@ -3,6 +3,7 @@ package ring import ( "context" "errors" + "slices" "testing" "time" @@ -90,9 +91,9 @@ var ( ) // Return a function that fails starting from failAfter times -func failingFunctionAfter(failAfter int32, delay time.Duration) func(context.Context, *InstanceDesc) (interface{}, error) { +func failingFunctionAfter(failAfter int32, delay time.Duration) func(context.Context, *InstanceDesc) (any, error) { count := atomic.NewInt32(0) - return func(context.Context, *InstanceDesc) (interface{}, error) { + return func(context.Context, *InstanceDesc) (any, error) { time.Sleep(delay) if count.Inc() > failAfter { return nil, errFailure @@ -101,12 +102,10 @@ func failingFunctionAfter(failAfter int32, delay time.Duration) func(context.Con } } -func failingFunctionOnZones(zones ...string) func(context.Context, *InstanceDesc) (interface{}, error) { - return func(ctx context.Context, ing *InstanceDesc) (interface{}, error) { - for _, zone := range zones { - if ing.Zone == zone { - return nil, errZoneFailure - } +func failingFunctionOnZones(zones ...string) func(context.Context, *InstanceDesc) (any, error) { + return func(ctx context.Context, ing *InstanceDesc) (any, error) { + if slices.Contains(zones, ing.Zone) { + return nil, errZoneFailure } return 1, nil } @@ -118,10 +117,10 @@ func TestReplicationSet_Do(t *testing.T) { instances []InstanceDesc maxErrors int maxUnavailableZones int - f func(context.Context, *InstanceDesc) (interface{}, error) + f func(context.Context, *InstanceDesc) (any, error) delay time.Duration cancelContextDelay time.Duration - want []interface{} + want []any expectedError error zoneResultsQuorum bool queryPartialData bool @@ -132,15 +131,15 @@ func TestReplicationSet_Do(t *testing.T) { instances: []InstanceDesc{ {}, }, - f: func(c context.Context, id *InstanceDesc) (interface{}, error) { + f: func(c context.Context, id *InstanceDesc) (any, error) { return 1, nil }, - want: []interface{}{1}, + want: []any{1}, }, { name: "max errors = 0, should fail on 1 error out of 1 instance", instances: []InstanceDesc{{}}, - f: func(c context.Context, id *InstanceDesc) (interface{}, error) { + f: func(c context.Context, id *InstanceDesc) (any, error) { return nil, errFailure }, want: nil, @@ -166,7 +165,7 @@ func TestReplicationSet_Do(t *testing.T) { name: "max errors = 1, should handle context canceled", instances: []InstanceDesc{{}, {}, {}}, maxErrors: 1, - f: func(c context.Context, id *InstanceDesc) (interface{}, error) { + f: func(c context.Context, id *InstanceDesc) (any, error) { time.Sleep(300 * time.Millisecond) return 1, nil }, @@ -177,17 +176,17 @@ func TestReplicationSet_Do(t *testing.T) { { name: "max errors = 0, should succeed on all successful instances", instances: []InstanceDesc{{Zone: "zone1"}, {Zone: "zone2"}, {Zone: "zone3"}}, - f: func(c context.Context, id *InstanceDesc) (interface{}, error) { + f: func(c context.Context, id *InstanceDesc) (any, error) { return 1, nil }, - want: []interface{}{1, 1, 1}, + want: []any{1, 1, 1}, }, { name: "max unavailable zones = 1, should succeed on instances failing in 1 out of 3 zones (3 instances)", instances: []InstanceDesc{{Zone: "zone1"}, {Zone: "zone2"}, {Zone: "zone3"}}, f: failingFunctionOnZones("zone1"), maxUnavailableZones: 1, - want: []interface{}{1, 1}, + want: []any{1, 1}, }, { name: "max unavailable zones = 1, should fail on instances failing in 2 out of 3 zones (3 instances)", @@ -199,7 +198,7 @@ func TestReplicationSet_Do(t *testing.T) { { name: "with partial data enabled and max unavailable zones = 1, should succeed on instances failing in 2 out of 3 zones (6 instances)", instances: []InstanceDesc{{Addr: "10.0.0.1", Zone: "zone1"}, {Addr: "10.0.0.2", Zone: "zone2"}, {Addr: "10.0.0.3", Zone: "zone3"}, {Addr: "10.0.0.4", Zone: "zone1"}, {Addr: "10.0.0.5", Zone: "zone2"}, {Addr: "10.0.0.6", Zone: "zone3"}}, - f: func(ctx context.Context, ing *InstanceDesc) (interface{}, error) { + f: func(ctx context.Context, ing *InstanceDesc) (any, error) { if ing.Addr == "10.0.0.1" || ing.Addr == "10.0.0.2" { return nil, errZoneFailure } @@ -207,7 +206,7 @@ func TestReplicationSet_Do(t *testing.T) { }, maxUnavailableZones: 1, queryPartialData: true, - want: []interface{}{1, 1, 1, 1}, + want: []any{1, 1, 1, 1}, expectedError: partialdata.ErrPartialData, errStrContains: []string{"10.0.0.1", "10.0.0.2", "zone failed"}, }, @@ -222,7 +221,7 @@ func TestReplicationSet_Do(t *testing.T) { { name: "with partial data enabled, should fail on instances returning 422", instances: []InstanceDesc{{Addr: "1", Zone: "zone1"}, {Addr: "2", Zone: "zone2"}, {Addr: "3", Zone: "zone3"}, {Addr: "4", Zone: "zone1"}, {Addr: "5", Zone: "zone2"}, {Addr: "6", Zone: "zone3"}}, - f: func(ctx context.Context, ing *InstanceDesc) (interface{}, error) { + f: func(ctx context.Context, ing *InstanceDesc) (any, error) { if ing.Addr == "1" || ing.Addr == "2" { return nil, validation.LimitError("limit breached") } @@ -237,7 +236,7 @@ func TestReplicationSet_Do(t *testing.T) { instances: []InstanceDesc{{Zone: "zone1"}, {Zone: "zone1"}, {Zone: "zone2"}, {Zone: "zone2"}, {Zone: "zone3"}, {Zone: "zone3"}}, f: failingFunctionOnZones("zone1"), maxUnavailableZones: 1, - want: []interface{}{1, 1, 1, 1}, + want: []any{1, 1, 1, 1}, }, { name: "max unavailable zones = 2, should fail on instances failing in 3 out of 5 zones (5 instances)", @@ -251,16 +250,16 @@ func TestReplicationSet_Do(t *testing.T) { instances: []InstanceDesc{{Zone: "zone1"}, {Zone: "zone1"}, {Zone: "zone2"}, {Zone: "zone2"}, {Zone: "zone3"}, {Zone: "zone3"}, {Zone: "zone4"}, {Zone: "zone4"}, {Zone: "zone5"}, {Zone: "zone5"}}, f: failingFunctionOnZones("zone1", "zone5"), maxUnavailableZones: 2, - want: []interface{}{1, 1, 1, 1, 1, 1}, + want: []any{1, 1, 1, 1, 1, 1}, }, { name: "max unavailable zones = 1, zoneResultsQuorum = true, should contain 4 results (2 from zone1, 2 from zone2)", instances: []InstanceDesc{{Zone: "zone1"}, {Zone: "zone2"}, {Zone: "zone3"}, {Zone: "zone1"}, {Zone: "zone2"}, {Zone: "zone3"}}, - f: func(c context.Context, id *InstanceDesc) (interface{}, error) { + f: func(c context.Context, id *InstanceDesc) (any, error) { return 1, nil }, maxUnavailableZones: 1, - want: []interface{}{1, 1, 1, 1}, + want: []any{1, 1, 1, 1}, zoneResultsQuorum: true, }, } diff --git a/pkg/ring/replication_set_tracker.go b/pkg/ring/replication_set_tracker.go index bc7401240d..0ea465cfda 100644 --- a/pkg/ring/replication_set_tracker.go +++ b/pkg/ring/replication_set_tracker.go @@ -8,7 +8,7 @@ type replicationSetResultTracker interface { // Signals an instance has done the execution, either successful (no error) // or failed (with error). If successful, result will be recorded and can // be accessed via getResults. - done(instance *InstanceDesc, result interface{}, err error) + done(instance *InstanceDesc, result any, err error) // Returns true if all instances are done executing finished() bool @@ -23,7 +23,7 @@ type replicationSetResultTracker interface { failedCompletely() bool // Returns recorded results. - getResults() []interface{} + getResults() []any // Returns errors getErrors() []error @@ -34,7 +34,7 @@ type defaultResultTracker struct { numSucceeded int numErrors int maxErrors int - results []interface{} + results []any numInstances int errors []error } @@ -46,12 +46,12 @@ func newDefaultResultTracker(instances []InstanceDesc, maxErrors int) *defaultRe numErrors: 0, maxErrors: maxErrors, errors: make([]error, 0, len(instances)), - results: make([]interface{}, 0, len(instances)), + results: make([]any, 0, len(instances)), numInstances: len(instances), } } -func (t *defaultResultTracker) done(instance *InstanceDesc, result interface{}, err error) { +func (t *defaultResultTracker) done(instance *InstanceDesc, result any, err error) { if err == nil { t.numSucceeded++ t.results = append(t.results, result) @@ -77,7 +77,7 @@ func (t *defaultResultTracker) failedCompletely() bool { return t.numInstances == t.numErrors } -func (t *defaultResultTracker) getResults() []interface{} { +func (t *defaultResultTracker) getResults() []any { return t.results } @@ -92,7 +92,7 @@ type zoneAwareResultTracker struct { failuresByZone map[string]int minSuccessfulZones int maxUnavailableZones int - resultsPerZone map[string][]interface{} + resultsPerZone map[string][]any numInstances int zoneResultsQuorum bool zoneCount int @@ -114,13 +114,13 @@ func newZoneAwareResultTracker(instances []InstanceDesc, maxUnavailableZones int t.waitingByZone[instance.Zone]++ } t.minSuccessfulZones = len(t.waitingByZone) - maxUnavailableZones - t.resultsPerZone = make(map[string][]interface{}, len(t.waitingByZone)) + t.resultsPerZone = make(map[string][]any, len(t.waitingByZone)) t.zoneCount = len(t.waitingByZone) return t } -func (t *zoneAwareResultTracker) done(instance *InstanceDesc, result interface{}, err error) { +func (t *zoneAwareResultTracker) done(instance *InstanceDesc, result any, err error) { if err != nil { t.failuresByZone[instance.Zone]++ t.errors = append(t.errors, fmt.Errorf("(%s, %s) %w", instance.GetAddr(), instance.GetZone(), err)) @@ -128,7 +128,7 @@ func (t *zoneAwareResultTracker) done(instance *InstanceDesc, result interface{} if _, ok := t.resultsPerZone[instance.Zone]; !ok { // If it is the first result in the zone, then total number of instances // in this zone should be number of waiting required. - t.resultsPerZone[instance.Zone] = make([]interface{}, 0, t.waitingByZone[instance.Zone]) + t.resultsPerZone[instance.Zone] = make([]any, 0, t.waitingByZone[instance.Zone]) } t.resultsPerZone[instance.Zone] = append(t.resultsPerZone[instance.Zone], result) } @@ -167,8 +167,8 @@ func (t *zoneAwareResultTracker) failedCompletely() bool { return allZonesFailed || (t.failed() && atLeastHalfOfFleetFailed) } -func (t *zoneAwareResultTracker) getResults() []interface{} { - results := make([]interface{}, 0, t.numInstances) +func (t *zoneAwareResultTracker) getResults() []any { + results := make([]any, 0, t.numInstances) if t.zoneResultsQuorum { for zone, waiting := range t.waitingByZone { // No need to check failuresByZone since tracker diff --git a/pkg/ring/replication_set_tracker_test.go b/pkg/ring/replication_set_tracker_test.go index e5ee5c9de1..b0e062e308 100644 --- a/pkg/ring/replication_set_tracker_test.go +++ b/pkg/ring/replication_set_tracker_test.go @@ -130,7 +130,7 @@ func TestDefaultResultTracker(t *testing.T) { assert.True(t, tracker.succeeded()) assert.False(t, tracker.failed()) - assert.Equal(t, []interface{}{1, 2, 3}, tracker.getResults()) + assert.Equal(t, []any{1, 2, 3}, tracker.getResults()) }, }, "record and getResults2": { @@ -152,7 +152,7 @@ func TestDefaultResultTracker(t *testing.T) { assert.True(t, tracker.succeeded()) assert.False(t, tracker.failed()) - assert.Equal(t, []interface{}{[]int{1, 1, 1}, []int{2, 2, 2}, []int{3, 3, 3}}, tracker.getResults()) + assert.Equal(t, []any{[]int{1, 1, 1}, []int{2, 2, 2}, []int{3, 3, 3}}, tracker.getResults()) }, }, "failedCompletely() should return true only if all instances have failed, regardless of max errors": { @@ -249,7 +249,7 @@ func TestZoneAwareResultTracker(t *testing.T) { assert.True(t, tracker.succeeded()) assert.False(t, tracker.failed()) - assert.Equal(t, []interface{}{1, 1, 1}, tracker.getResults()) + assert.Equal(t, []any{1, 1, 1}, tracker.getResults()) }, }, "should succeed once all 6 instances succeed on max unavailable zones = 0": { @@ -283,7 +283,7 @@ func TestZoneAwareResultTracker(t *testing.T) { assert.True(t, tracker.succeeded()) assert.False(t, tracker.failed()) - assert.Equal(t, []interface{}{1, 1, 1, 1, 1, 1}, tracker.getResults()) + assert.Equal(t, []any{1, 1, 1, 1, 1, 1}, tracker.getResults()) }, }, "should succeed once all 5 instances succeed on max unavailable zones = 1, zone results quorum disabled": { @@ -314,7 +314,7 @@ func TestZoneAwareResultTracker(t *testing.T) { assert.True(t, tracker.succeeded()) assert.False(t, tracker.failed()) - assert.Equal(t, []interface{}{1, 1, 1, 1, 1}, tracker.getResults()) + assert.Equal(t, []any{1, 1, 1, 1, 1}, tracker.getResults()) }, }, "should succeed once all 5 instances succeed on max unavailable zones = 1, zone results quorum enabled": { @@ -345,7 +345,7 @@ func TestZoneAwareResultTracker(t *testing.T) { assert.True(t, tracker.succeeded()) assert.False(t, tracker.failed()) - assert.Equal(t, []interface{}{1, 1, 1, 1}, tracker.getResults()) + assert.Equal(t, []any{1, 1, 1, 1}, tracker.getResults()) }, }, "should fail on 1st failing instance on max unavailable zones = 0": { diff --git a/pkg/ring/ring.go b/pkg/ring/ring.go index 92c343d684..6b121adea0 100644 --- a/pkg/ring/ring.go +++ b/pkg/ring/ring.go @@ -8,6 +8,7 @@ import ( "fmt" "math" "math/rand" + "slices" "sync" "time" @@ -19,7 +20,6 @@ import ( "github.com/cortexproject/cortex/pkg/ring/kv" shardUtil "github.com/cortexproject/cortex/pkg/ring/shard" - "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/flagext" "github.com/cortexproject/cortex/pkg/util/services" ) @@ -307,7 +307,7 @@ func (r *Ring) loop(ctx context.Context) error { r.updateRingMetrics(Different) r.mtx.Unlock() - r.KVClient.WatchKey(ctx, r.key, func(value interface{}) bool { + r.KVClient.WatchKey(ctx, r.key, func(value any) bool { if value == nil { level.Info(r.logger).Log("msg", "ring doesn't exist in KV store yet") return true @@ -327,7 +327,7 @@ func (r *Ring) updateRingState(ringDesc *Desc) { // Filter out all instances belonging to excluded zones. if len(r.cfg.ExcludedZones) > 0 { for instanceID, instance := range ringDesc.Ingesters { - if util.StringsContain(r.cfg.ExcludedZones, instance.Zone) { + if slices.Contains(r.cfg.ExcludedZones, instance.Zone) { delete(ringDesc.Ingesters, instanceID) } } @@ -411,7 +411,7 @@ func (r *Ring) Get(key uint32, op Operation, bufDescs []InstanceDesc, bufHosts [ } // We want n *distinct* instances. - if util.StringsContain(distinctHosts, info.InstanceID) { + if slices.Contains(distinctHosts, info.InstanceID) { continue } @@ -589,10 +589,7 @@ func (r *Ring) GetReplicationSetForOperation(op Operation) (ReplicationSet, erro } else { // Calculate the number of required instances; // ensure we always require at least RF-1 when RF=3. - numRequired := len(r.ringDesc.Ingesters) - if numRequired < r.cfg.ReplicationFactor { - numRequired = r.cfg.ReplicationFactor - } + numRequired := max(len(r.ringDesc.Ingesters), r.cfg.ReplicationFactor) // We can tolerate this many failures numRequired -= r.cfg.ReplicationFactor / 2 diff --git a/pkg/ring/ring_test.go b/pkg/ring/ring_test.go index 682cb7d942..55d5ff1058 100644 --- a/pkg/ring/ring_test.go +++ b/pkg/ring/ring_test.go @@ -6,6 +6,7 @@ import ( "fmt" "math" "math/rand" + "slices" "sort" "strconv" "strings" @@ -47,7 +48,7 @@ func benchmarkBatch(b *testing.B, g TokenGenerator, numInstances, numKeys int) { // Make a random ring with N instances, and M tokens per ingests desc := NewDesc() ring := &Desc{} - for i := 0; i < numInstances; i++ { + for i := range numInstances { tokens := g.GenerateTokens(ring, strconv.Itoa(i), "zone", numTokens, true) desc.AddIngester(fmt.Sprintf("%d", i), fmt.Sprintf("instance-%d", i), strconv.Itoa(i), tokens, ACTIVE, time.Now()) } @@ -88,9 +89,8 @@ func benchmarkBatch(b *testing.B, g TokenGenerator, numInstances, numKeys int) { for n, c := range tc { b.Run(n, func(b *testing.B) { // Generate a batch of N random keys, and look them up - b.ResetTimer() b.ReportAllocs() - for i := 0; i < b.N; i++ { + for b.Loop() { generateKeys(rnd, numKeys, keys) err := DoBatch(ctx, Write, &r, c.exe, keys, callback, cleanup) require.NoError(b, err) @@ -100,7 +100,7 @@ func benchmarkBatch(b *testing.B, g TokenGenerator, numInstances, numKeys int) { } func generateKeys(r *rand.Rand, numTokens int, dest []uint32) { - for i := 0; i < numTokens; i++ { + for i := range numTokens { dest[i] = r.Uint32() } } @@ -136,7 +136,7 @@ func benchmarkUpdateRingState(b *testing.B, g TokenGenerator, numInstances, numZ // Also make a copy with different timestamps and one with different tokens desc := NewDesc() otherDesc := NewDesc() - for i := 0; i < numInstances; i++ { + for i := range numInstances { id := fmt.Sprintf("%d", i) tokens := g.GenerateTokens(desc, id, "zone", numTokens, true) now := time.Now() @@ -156,8 +156,8 @@ func benchmarkUpdateRingState(b *testing.B, g TokenGenerator, numInstances, numZ } flipFlop := true - b.ResetTimer() - for n := 0; n < b.N; n++ { + + for b.Loop() { if flipFlop { ring.updateRingState(desc) } else { @@ -285,7 +285,7 @@ func TestRing_Get_ZoneAwarenessWithIngesterLeaving(t *testing.T) { // Use the GenerateTokens to get an array of random uint32 values. testValues := g.GenerateTokens(r, "", "", testCount, true) - for i := 0; i < testCount; i++ { + for i := range testCount { set, err := ring.Get(testValues[i], Write, instancesList, bufHosts, bufZones) require.NoError(t, err) @@ -362,7 +362,7 @@ func TestRing_Get_ZoneAwarenessWithIngesterJoining(t *testing.T) { // Use the GenerateTokens to get an array of random uint32 values. testValues := g.GenerateTokens(ring.ringDesc, "", "", testCount, true) - for i := 0; i < testCount; i++ { + for i := range testCount { set, err := ring.Get(testValues[i], Write, instancesList, bufHosts, bufZones) require.NoError(t, err) @@ -467,7 +467,7 @@ func TestRing_Get_ZoneAwareness(t *testing.T) { var set ReplicationSet var err error - for i := 0; i < testCount; i++ { + for i := range testCount { set, err = ring.Get(testValues[i], Write, instances, bufHosts, bufZones) if testData.expectedErr != "" { require.EqualError(t, err, testData.expectedErr) @@ -557,12 +557,12 @@ func TestRing_Get_Stability(t *testing.T) { KVClient: &MockClient{}, } - for i := 0; i < numOfTokensToTest; i++ { + for i := range numOfTokensToTest { expectedSet, err := ring.Get(testValues[i], Write, bufDescs, bufHosts, bufZones) assert.NoError(t, err) assert.Equal(t, testData.replicationFactor, len(expectedSet.Instances)) - for j := 0; j < numOfInvocations; j++ { + for range numOfInvocations { newSet, err := ring.Get(testValues[i], Write, bufDescs, bufHosts, bufZones) assert.NoError(t, err) assert.Equal(t, expectedSet, newSet) @@ -687,7 +687,7 @@ func TestRing_Get_Consistency(t *testing.T) { ringDesc := &Desc{Ingesters: generateRingInstances(testData.initialInstances, testData.numZones, 128)} testValues := g.GenerateTokens(ringDesc, "", "", 128, true) bufDescs, bufHosts, bufZones := MakeBuffersForGet() - for i := 0; i < 128; i++ { + for i := range 128 { ring := Ring{ cfg: Config{ HeartbeatTimeout: time.Hour, @@ -971,9 +971,6 @@ func TestRing_GetAllHealthy(t *testing.T) { t.Run(testName, func(t *testing.T) { // Init the ring. ringDesc := &Desc{Ingesters: testData.ringInstances} - for id, instance := range ringDesc.Ingesters { - ringDesc.Ingesters[id] = instance - } ring := Ring{ cfg: Config{HeartbeatTimeout: heartbeatTimeout}, @@ -1193,9 +1190,6 @@ func TestRing_GetReplicationSetForOperation(t *testing.T) { t.Run(testName, func(t *testing.T) { // Init the ring. ringDesc := &Desc{Ingesters: testData.ringInstances} - for id, instance := range ringDesc.Ingesters { - ringDesc.Ingesters[id] = instance - } ring := Ring{ cfg: Config{ @@ -1782,7 +1776,7 @@ func TestRing_ShuffleShard_Stability(t *testing.T) { require.NoError(t, err) // Assert that multiple invocations generate the same exact shard. - for n := 0; n < numInvocations; n++ { + for range numInvocations { r := ring.ShuffleShard(tenantID, size) actual, err := r.GetAllHealthy(Read) require.NoError(t, err) @@ -1814,7 +1808,7 @@ func TestRing_ShuffleShard_Shuffling(t *testing.T) { // Initialise the ring instances. To have stable tests we generate tokens using a linear // distribution. Tokens within the same zone are evenly distributed too. instances := make(map[string]InstanceDesc, numInstances) - for i := 0; i < numInstances; i++ { + for i := range numInstances { id := fmt.Sprintf("instance-%d", i) instances[id] = InstanceDesc{ Addr: fmt.Sprintf("127.0.0.%d", i), @@ -1871,7 +1865,7 @@ func TestRing_ShuffleShard_Shuffling(t *testing.T) { numMatching := 0 for _, c := range currShard { - if util.StringsContain(otherShard, c) { + if slices.Contains(otherShard, c) { numMatching++ } } @@ -1944,7 +1938,7 @@ func TestRing_ShuffleShard_Consistency(t *testing.T) { // Compute the initial shard for each tenant. initial := map[int]ReplicationSet{} - for id := 0; id < numTenants; id++ { + for id := range numTenants { set, err := ring.ShuffleShard(fmt.Sprintf("%d", id), s.shardSize).GetAllHealthy(Read) require.NoError(t, err) initial[id] = set @@ -1971,7 +1965,7 @@ func TestRing_ShuffleShard_Consistency(t *testing.T) { // Compute the update shard for each tenant and compare it with the initial one. // If the "consistency" property is guaranteed, we expect no more then 1 different instance // in the updated shard. - for id := 0; id < numTenants; id++ { + for id := range numTenants { updated, err := ring.ShuffleShard(fmt.Sprintf("%d", id), s.shardSize).GetAllHealthy(Read) require.NoError(t, err) @@ -1986,7 +1980,7 @@ func TestRing_ShuffleShard_Consistency(t *testing.T) { func TestRing_ShuffleShard_ConsistencyOnShardSizeChanged(t *testing.T) { // Create 30 instances in 3 zones. ringInstances := map[string]InstanceDesc{} - for i := 0; i < 30; i++ { + for i := range 30 { name, desc := generateRingInstance(i, i%3, 128) ringInstances[name] = desc } @@ -2067,7 +2061,7 @@ func TestRing_ShuffleShard_ConsistencyOnShardSizeChanged(t *testing.T) { func TestRing_ShuffleShardWithZoneStability_ConsistencyOnShardSizeChanged(t *testing.T) { // Create 300 instances in 3 zones. ringInstances := map[string]InstanceDesc{} - for i := 0; i < 300; i++ { + for i := range 300 { name, desc := generateRingInstance(i, i%3, 128) ringInstances[name] = desc } @@ -2129,7 +2123,7 @@ func TestRing_ShuffleShardWithZoneStability_ConsistencyOnShardSizeChanged(t *tes func TestRing_ShuffleShard_ConsistencyOnZonesChanged(t *testing.T) { // Create 20 instances in 2 zones. ringInstances := map[string]InstanceDesc{} - for i := 0; i < 20; i++ { + for i := range 20 { name, desc := generateRingInstance(i, i%2, 128) ringInstances[name] = desc } @@ -2206,7 +2200,7 @@ func TestRing_ShuffleShard_ConsistencyOnZonesChanged(t *testing.T) { func TestRing_ShuffleShardWithZoneStability_ConsistencyOnZonesChanged(t *testing.T) { // Create 20 instances in 2 zones. ringInstances := map[string]InstanceDesc{} - for i := 0; i < 20; i++ { + for i := range 20 { name, desc := generateRingInstance(i, i%2, 128) ringInstances[name] = desc } @@ -2600,9 +2594,6 @@ func TestRing_ShuffleShardWithReadOnlyIngesters(t *testing.T) { t.Run(testName, func(t *testing.T) { // Init the ring. ringDesc := &Desc{Ingesters: testData.ringInstances} - for id, instance := range ringDesc.Ingesters { - ringDesc.Ingesters[id] = instance - } ring := Ring{ cfg: Config{ @@ -2823,9 +2814,7 @@ func benchmarkShuffleSharding(b *testing.B, numInstances, numZones, numTokens, s KVClient: &MockClient{}, } - b.ResetTimer() - - for n := 0; n < b.N; n++ { + for b.Loop() { ring.ShuffleShard("tenant-1", shardSize) } } @@ -2855,9 +2844,7 @@ func BenchmarkRing_Get(b *testing.B) { buf, bufHosts, bufZones := MakeBuffersForGet() r := rand.New(rand.NewSource(time.Now().UnixNano())) - b.ResetTimer() - - for n := 0; n < b.N; n++ { + for b.Loop() { set, err := ring.Get(r.Uint32(), Write, buf, bufHosts, bufZones) if err != nil || len(set.Instances) != replicationFactor { b.Fatal() @@ -3007,7 +2994,7 @@ func TestRingUpdates(t *testing.T) { } // Ensure the ring client got updated. - test.Poll(t, 1*time.Second, testData.expectedInstances, func() interface{} { + test.Poll(t, 1*time.Second, testData.expectedInstances, func() any { return ring.InstancesCount() }) @@ -3024,7 +3011,7 @@ func TestRingUpdates(t *testing.T) { // Ensure there's no instance in an excluded zone. if len(testData.excludedZones) > 0 { - assert.False(t, util.StringsContain(testData.excludedZones, ing.Zone)) + assert.False(t, slices.Contains(testData.excludedZones, ing.Zone)) } } @@ -3034,7 +3021,7 @@ func TestRingUpdates(t *testing.T) { } // Ensure the ring client got updated. - test.Poll(t, 1*time.Second, 0, func() interface{} { + test.Poll(t, 1*time.Second, 0, func() any { return ring.InstancesCount() }) }) @@ -3099,14 +3086,14 @@ func TestShuffleShardWithCaching(t *testing.T) { const zones = 3 lcs := []*Lifecycler(nil) - for i := 0; i < numLifecyclers; i++ { + for i := range numLifecyclers { lc := startLifecycler(t, cfg, 500*time.Millisecond, i, zones) lcs = append(lcs, lc) } // Wait until all instances in the ring are ACTIVE. - test.Poll(t, 5*time.Second, numLifecyclers, func() interface{} { + test.Poll(t, 5*time.Second, numLifecyclers, func() any { active := 0 rs, _ := ring.GetReplicationSetForOperation(Read) for _, ing := range rs.Instances { @@ -3127,7 +3114,7 @@ func TestShuffleShardWithCaching(t *testing.T) { // Do 100 iterations over two seconds. Make sure we get the same subring. const iters = 100 sleep := (2 * time.Second) / iters - for i := 0; i < iters; i++ { + for range iters { newSubring := ring.ShuffleShard(user, shardSize) require.True(t, subring == newSubring, "cached subring reused") require.Equal(t, shardSize, subring.InstancesCount()) @@ -3147,11 +3134,11 @@ func TestShuffleShardWithCaching(t *testing.T) { } // Now stop one lifecycler from each zone. Subring needs to be recomputed. - for i := 0; i < zones; i++ { + for i := range zones { require.NoError(t, services.StopAndAwaitTerminated(context.Background(), lcs[i])) } - test.Poll(t, 5*time.Second, numLifecyclers-zones, func() interface{} { + test.Poll(t, 5*time.Second, numLifecyclers-zones, func() any { return ring.InstancesCount() }) diff --git a/pkg/ring/token_file_test.go b/pkg/ring/token_file_test.go index a456da5afe..51b347b7bb 100644 --- a/pkg/ring/token_file_test.go +++ b/pkg/ring/token_file_test.go @@ -12,7 +12,7 @@ import ( func TestTokenFile_Serialization(t *testing.T) { tokens := make(Tokens, 0, 512) - for i := 0; i < 512; i++ { + for range 512 { tokens = append(tokens, uint32(rand.Int31())) } tokenFile := TokenFile{ @@ -30,7 +30,7 @@ func TestTokenFile_Serialization(t *testing.T) { func TestTokenFile_Serialization_ForwardCompatibility(t *testing.T) { tokens := make(Tokens, 0, 512) - for i := 0; i < 512; i++ { + for range 512 { tokens = append(tokens, uint32(rand.Int31())) } b, err := oldMarshal(tokens) @@ -44,7 +44,7 @@ func TestTokenFile_Serialization_ForwardCompatibility(t *testing.T) { func TestTokenFile_Serialization_BackwardCompatibility(t *testing.T) { tokens := make(Tokens, 0, 512) - for i := 0; i < 512; i++ { + for range 512 { tokens = append(tokens, uint32(rand.Int31())) } tokenFile := TokenFile{ diff --git a/pkg/ring/token_generator.go b/pkg/ring/token_generator.go index 06d54cbfae..59f3db23a3 100644 --- a/pkg/ring/token_generator.go +++ b/pkg/ring/token_generator.go @@ -4,6 +4,7 @@ import ( "container/heap" "math" "math/rand" + "slices" "sort" "strings" "time" @@ -59,9 +60,7 @@ func (g *RandomTokenGenerator) GenerateTokens(ring *Desc, _, _ string, numTokens } // Ensure returned tokens are sorted. - sort.Slice(tokens, func(i, j int) bool { - return tokens[i] < tokens[j] - }) + slices.Sort(tokens) return tokens } @@ -235,9 +234,7 @@ func (g *MinimizeSpreadTokenGenerator) GenerateTokens(ring *Desc, id, zone strin } } - sort.Slice(r, func(i, j int) bool { - return r[i] < r[j] - }) + slices.Sort(r) return r } @@ -291,7 +288,7 @@ func tokenDistance(from, to uint32) int64 { } func findFirst(n int, f func(int) bool) int { - for i := 0; i < n; i++ { + for i := range n { if f(i) { return i } diff --git a/pkg/ring/token_generator_test.go b/pkg/ring/token_generator_test.go index 0c482f144d..a76826eb42 100644 --- a/pkg/ring/token_generator_test.go +++ b/pkg/ring/token_generator_test.go @@ -59,7 +59,7 @@ func TestGenerateTokens_IgnoresOldTokens(t *testing.T) { d := NewDesc() dups := make(map[uint32]bool) - for i := 0; i < 500; i++ { + for i := range 500 { id := strconv.Itoa(i) zone := strconv.Itoa(i % 3) tokens := tc.tg.GenerateTokens(d, id, zone, 500, true) @@ -91,7 +91,7 @@ func TestMinimizeSpreadTokenGenerator(t *testing.T) { require.Equal(t, mTokenGenerator.called, len(zones)) // Should Generate tokens based on the ring state - for i := 0; i < 50; i++ { + for i := range 50 { generateTokensForIngesters(t, rindDesc, fmt.Sprintf("minimize-%v", i), zones, minimizeTokenGenerator, dups) assertDistancePerIngester(t, rindDesc, 0.01) } diff --git a/pkg/ring/tokens.go b/pkg/ring/tokens.go index 28c57d460d..48accde1b1 100644 --- a/pkg/ring/tokens.go +++ b/pkg/ring/tokens.go @@ -21,7 +21,7 @@ func (t Tokens) Equals(other Tokens) bool { sort.Sort(mine) sort.Sort(other) - for i := 0; i < len(mine); i++ { + for i := range mine { if mine[i] != other[i] { return false } diff --git a/pkg/ruler/api.go b/pkg/ruler/api.go index 695e33954a..59e3303127 100644 --- a/pkg/ruler/api.go +++ b/pkg/ruler/api.go @@ -68,7 +68,7 @@ type RuleGroup struct { Limit int64 `json:"limit"` } -type rule interface{} +type rule any type alertingRule struct { // State can be "pending", "firing", "inactive". @@ -404,7 +404,7 @@ var ( ErrBadRuleGroup = errors.New("unable to decoded rule group") ) -func marshalAndSend(output interface{}, w http.ResponseWriter, logger log.Logger) { +func marshalAndSend(output any, w http.ResponseWriter, logger log.Logger) { d, err := yaml.Marshal(&output) if err != nil { level.Error(logger).Log("msg", "error marshalling yaml rule groups", "err", err) diff --git a/pkg/ruler/api_test.go b/pkg/ruler/api_test.go index e717ede2d0..2485f5b927 100644 --- a/pkg/ruler/api_test.go +++ b/pkg/ruler/api_test.go @@ -206,31 +206,31 @@ func Test_stripEvaluationFields(t *testing.T) { // stripEvaluationFields sets evaluation-related fields of a rules API response to zero values. func stripEvaluationFields(t *testing.T, r util_api.Response) { - dataMap, ok := r.Data.(map[string]interface{}) + dataMap, ok := r.Data.(map[string]any) if !ok { t.Fatalf("expected map[string]interface{} got %T", r.Data) } - groups, ok := dataMap["groups"].([]interface{}) + groups, ok := dataMap["groups"].([]any) if !ok { t.Fatalf("expected []interface{} got %T", dataMap["groups"]) } for i := range groups { - group, ok := groups[i].(map[string]interface{}) + group, ok := groups[i].(map[string]any) if !ok { t.Fatalf("expected map[string]interface{} got %T", groups[i]) } group["evaluationTime"] = 0 group["lastEvaluation"] = "0001-01-01T00:00:00Z" - rules, ok := group["rules"].([]interface{}) + rules, ok := group["rules"].([]any) if !ok { t.Fatalf("expected []interface{} got %T", group["rules"]) } for i := range rules { - rule, ok := rules[i].(map[string]interface{}) + rule, ok := rules[i].(map[string]any) if !ok { t.Fatalf("expected map[string]interface{} got %T", rules[i]) } diff --git a/pkg/ruler/client_pool_test.go b/pkg/ruler/client_pool_test.go index 66fe273a68..20b5b178aa 100644 --- a/pkg/ruler/client_pool_test.go +++ b/pkg/ruler/client_pool_test.go @@ -40,7 +40,7 @@ func Test_newRulerClientFactory(t *testing.T) { reg := prometheus.NewPedanticRegistry() factory := newRulerClientFactory(cfg, reg) - for i := 0; i < 2; i++ { + for range 2 { client, err := factory(listener.Addr().String()) require.NoError(t, err) defer client.Close() //nolint:errcheck diff --git a/pkg/ruler/compat.go b/pkg/ruler/compat.go index c8d8302e27..8357b6db76 100644 --- a/pkg/ruler/compat.go +++ b/pkg/ruler/compat.go @@ -4,10 +4,12 @@ import ( "context" "errors" "fmt" + "time" "github.com/go-kit/log" "github.com/go-kit/log/level" + "github.com/google/uuid" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" @@ -27,6 +29,7 @@ import ( "github.com/cortexproject/cortex/pkg/ring/client" util_log "github.com/cortexproject/cortex/pkg/util/log" promql_util "github.com/cortexproject/cortex/pkg/util/promql" + "github.com/cortexproject/cortex/pkg/util/requestmeta" "github.com/cortexproject/cortex/pkg/util/validation" ) @@ -183,6 +186,10 @@ func EngineQueryFunc(engine promql.QueryEngine, frontendClient *frontendClient, } } + // Add request ID to the context so that it can be used in logs and metrics for split queries. + ctx = requestmeta.ContextWithRequestId(ctx, uuid.NewString()) + ctx = requestmeta.ContextWithRequestSource(ctx, requestmeta.SourceRuler) + if frontendClient != nil { v, err := frontendClient.InstantQuery(ctx, qs, t) if err != nil { @@ -267,12 +274,12 @@ func RecordAndReportRuleQueryMetrics(qf rules.QueryFunc, userID string, evalMetr queryChunkBytes.Add(float64(queryStats.FetchedChunkBytes)) queryDataBytes.Add(float64(queryStats.FetchedDataBytes)) // Log ruler query stats. - logMessage := []interface{}{ + logMessage := []any{ "msg", "query stats", "component", "ruler", } if origin := ctx.Value(promql.QueryOrigin{}); origin != nil { - queryLabels := origin.(map[string]interface{}) + queryLabels := origin.(map[string]any) rgMap := queryLabels["ruleGroup"].(map[string]string) logMessage = append(logMessage, "rule_group", rgMap["name"], diff --git a/pkg/ruler/external_labels.go b/pkg/ruler/external_labels.go index 886fc4d0ed..b0f2e4306b 100644 --- a/pkg/ruler/external_labels.go +++ b/pkg/ruler/external_labels.go @@ -20,7 +20,7 @@ func newUserExternalLabels(global labels.Labels, limits RulesLimits) *userExtern return &userExternalLabels{ global: global, limits: limits, - builder: labels.NewBuilder(nil), + builder: labels.NewBuilder(labels.EmptyLabels()), mtx: sync.Mutex{}, users: map[string]labels.Labels{}, @@ -41,9 +41,9 @@ func (e *userExternalLabels) update(userID string) (labels.Labels, bool) { defer e.mtx.Unlock() e.builder.Reset(e.global) - for _, l := range lset { + lset.Range(func(l labels.Label) { e.builder.Set(l.Name, l.Value) - } + }) lset = e.builder.Labels() if !labels.Equal(e.users[userID], lset) { diff --git a/pkg/ruler/external_labels_test.go b/pkg/ruler/external_labels_test.go index 45ff1507c8..86cc0bb153 100644 --- a/pkg/ruler/external_labels_test.go +++ b/pkg/ruler/external_labels_test.go @@ -22,7 +22,7 @@ func TestUserExternalLabels(t *testing.T) { name: "global labels only", removeBeforeTest: false, exists: false, - userExternalLabels: nil, + userExternalLabels: labels.EmptyLabels(), expectedExternalLabels: labels.FromStrings("from", "cortex"), }, { @@ -43,7 +43,6 @@ func TestUserExternalLabels(t *testing.T) { const userID = "test-user" for _, data := range tests { - data := data t.Run(data.name, func(t *testing.T) { if data.removeBeforeTest { e.remove(userID) diff --git a/pkg/ruler/frontend_client_pool.go b/pkg/ruler/frontend_client_pool.go index 7b131621aa..3884786138 100644 --- a/pkg/ruler/frontend_client_pool.go +++ b/pkg/ruler/frontend_client_pool.go @@ -4,18 +4,14 @@ import ( "time" "github.com/go-kit/log" - otgrpc "github.com/opentracing-contrib/go-grpc" - "github.com/opentracing/opentracing-go" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "github.com/weaveworks/common/httpgrpc" - "github.com/weaveworks/common/middleware" "google.golang.org/grpc" "google.golang.org/grpc/health/grpc_health_v1" "github.com/cortexproject/cortex/pkg/ring/client" "github.com/cortexproject/cortex/pkg/util/grpcclient" - cortexmiddleware "github.com/cortexproject/cortex/pkg/util/middleware" ) type frontendPool struct { @@ -55,11 +51,7 @@ func newFrontendPool(cfg Config, log log.Logger, reg prometheus.Registerer) *cli } func (f *frontendPool) createFrontendClient(addr string) (client.PoolClient, error) { - opts, err := f.grpcConfig.DialOption([]grpc.UnaryClientInterceptor{ - otgrpc.OpenTracingClientInterceptor(opentracing.GlobalTracer()), - middleware.ClientUserHeaderInterceptor, - cortexmiddleware.PrometheusGRPCUnaryInstrumentation(f.frontendClientRequestDuration), - }, nil) + opts, err := f.grpcConfig.DialOption(grpcclient.Instrument(f.frontendClientRequestDuration)) if err != nil { return nil, err } diff --git a/pkg/ruler/frontend_decoder.go b/pkg/ruler/frontend_decoder.go index 92a6b1a3f6..4086dceffb 100644 --- a/pkg/ruler/frontend_decoder.go +++ b/pkg/ruler/frontend_decoder.go @@ -5,7 +5,6 @@ import ( "encoding/json" "errors" "fmt" - "sort" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" @@ -76,20 +75,14 @@ func (j JsonDecoder) Decode(body []byte) (promql.Vector, Warnings, error) { func (j JsonDecoder) vectorToPromQLVector(vector model.Vector) promql.Vector { v := make([]promql.Sample, 0, len(vector)) for _, sample := range vector { - metric := make([]labels.Label, 0, len(sample.Metric)) + builder := labels.NewBuilder(labels.EmptyLabels()) for k, v := range sample.Metric { - metric = append(metric, labels.Label{ - Name: string(k), - Value: string(v), - }) + builder.Set(string(k), string(v)) } - sort.Slice(metric, func(i, j int) bool { - return metric[i].Name < metric[j].Name - }) v = append(v, promql.Sample{ T: int64(sample.Timestamp), F: float64(sample.Value), - Metric: metric, + Metric: builder.Labels(), }) } return v diff --git a/pkg/ruler/lifecycle_test.go b/pkg/ruler/lifecycle_test.go index 030d71f79d..bb09f6ef44 100644 --- a/pkg/ruler/lifecycle_test.go +++ b/pkg/ruler/lifecycle_test.go @@ -36,7 +36,7 @@ func TestRulerShutdown(t *testing.T) { defer services.StopAndAwaitTerminated(ctx, r) //nolint:errcheck // Wait until the tokens are registered in the ring - test.Poll(t, 100*time.Millisecond, config.Ring.NumTokens, func() interface{} { + test.Poll(t, 100*time.Millisecond, config.Ring.NumTokens, func() any { return numTokens(ringStore, "localhost", ringKey) }) @@ -45,7 +45,7 @@ func TestRulerShutdown(t *testing.T) { require.NoError(t, services.StopAndAwaitTerminated(context.Background(), r)) // Wait until the tokens are unregistered from the ring - test.Poll(t, 100*time.Millisecond, 0, func() interface{} { + test.Poll(t, 100*time.Millisecond, 0, func() any { return numTokens(ringStore, "localhost", ringKey) }) } @@ -73,7 +73,7 @@ func TestRuler_RingLifecyclerShouldAutoForgetUnhealthyInstances(t *testing.T) { // Add an unhealthy instance to the ring. tg := ring.NewRandomTokenGenerator() - require.NoError(t, ringStore.CAS(ctx, ringKey, func(in interface{}) (interface{}, bool, error) { + require.NoError(t, ringStore.CAS(ctx, ringKey, func(in any) (any, bool, error) { ringDesc := ring.GetOrCreateRingDesc(in) instance := ringDesc.AddIngester(unhealthyInstanceID, "1.1.1.1", "", tg.GenerateTokens(ringDesc, unhealthyInstanceID, "", config.Ring.NumTokens, true), ring.ACTIVE, time.Now()) @@ -84,7 +84,7 @@ func TestRuler_RingLifecyclerShouldAutoForgetUnhealthyInstances(t *testing.T) { })) // Ensure the unhealthy instance is removed from the ring. - test.Poll(t, time.Second*5, false, func() interface{} { + test.Poll(t, time.Second*5, false, func() any { d, err := ringStore.Get(ctx, ringKey) if err != nil { return err diff --git a/pkg/ruler/manager.go b/pkg/ruler/manager.go index 2f691abe32..8a23b37f33 100644 --- a/pkg/ruler/manager.go +++ b/pkg/ruler/manager.go @@ -270,7 +270,7 @@ func (r *DefaultMultiTenantManager) createRulesManager(user string, ctx context. } func defaultRuleGroupIterationFunc(ctx context.Context, g *promRules.Group, evalTimestamp time.Time) { - logMessage := []interface{}{ + logMessage := []any{ "component", "ruler", "rule_group", g.Name(), "namespace", g.File(), diff --git a/pkg/ruler/manager_test.go b/pkg/ruler/manager_test.go index 9af478b2b4..8abf3c2936 100644 --- a/pkg/ruler/manager_test.go +++ b/pkg/ruler/manager_test.go @@ -51,7 +51,7 @@ func TestSyncRuleGroups(t *testing.T) { mgr := getManager(m, user) require.NotNil(t, mgr) - test.Poll(t, 1*time.Second, true, func() interface{} { + test.Poll(t, 1*time.Second, true, func() any { return mgr.(*mockRulesManager).running.Load() }) @@ -72,7 +72,7 @@ func TestSyncRuleGroups(t *testing.T) { require.Nil(t, getManager(m, user)) // Make sure old manager was stopped. - test.Poll(t, 1*time.Second, false, func() interface{} { + test.Poll(t, 1*time.Second, false, func() any { return mgr.(*mockRulesManager).running.Load() }) @@ -94,7 +94,7 @@ func TestSyncRuleGroups(t *testing.T) { require.NotNil(t, newMgr) require.True(t, mgr != newMgr) - test.Poll(t, 1*time.Second, true, func() interface{} { + test.Poll(t, 1*time.Second, true, func() any { return newMgr.(*mockRulesManager).running.Load() }) @@ -107,7 +107,7 @@ func TestSyncRuleGroups(t *testing.T) { m.Stop() - test.Poll(t, 1*time.Second, false, func() interface{} { + test.Poll(t, 1*time.Second, false, func() any { return newMgr.(*mockRulesManager).running.Load() }) } @@ -167,7 +167,7 @@ func TestSlowRuleGroupSyncDoesNotSlowdownListRules(t *testing.T) { mgr := getManager(m, user) require.NotNil(t, mgr) - test.Poll(t, 1*time.Second, true, func() interface{} { + test.Poll(t, 1*time.Second, true, func() any { return mgr.(*mockRulesManager).running.Load() }) groups := m.GetRules(user) @@ -195,18 +195,18 @@ func TestSlowRuleGroupSyncDoesNotSlowdownListRules(t *testing.T) { groups = m.GetRules(user) require.Len(t, groups, len(groupsToReturn[0]), "expected %d but got %d", len(groupsToReturn[0]), len(groups)) - test.Poll(t, 5*time.Second, len(groupsToReturn[1]), func() interface{} { + test.Poll(t, 5*time.Second, len(groupsToReturn[1]), func() any { groups = m.GetRules(user) return len(groups) }) - test.Poll(t, 1*time.Second, true, func() interface{} { + test.Poll(t, 1*time.Second, true, func() any { return mgr.(*mockRulesManager).running.Load() }) m.Stop() - test.Poll(t, 1*time.Second, false, func() interface{} { + test.Poll(t, 1*time.Second, false, func() any { return mgr.(*mockRulesManager).running.Load() }) } diff --git a/pkg/ruler/mapper_test.go b/pkg/ruler/mapper_test.go index 10cb52aa28..d168895542 100644 --- a/pkg/ruler/mapper_test.go +++ b/pkg/ruler/mapper_test.go @@ -3,6 +3,7 @@ package ruler import ( "net/url" "os" + "slices" "testing" "github.com/go-kit/log" @@ -296,8 +297,8 @@ func Test_mapper_MapRulesMultipleFiles(t *testing.T) { updated, files, err := m.MapRules(testUser, twoFilesRuleSet) require.True(t, updated) require.Len(t, files, 2) - require.True(t, sliceContains(t, fileOnePath, files)) - require.True(t, sliceContains(t, fileTwoPath, files)) + require.True(t, slices.Contains(files, fileOnePath)) + require.True(t, slices.Contains(files, fileTwoPath)) require.NoError(t, err) exists, err := afero.Exists(m.FS, fileOnePath) @@ -312,8 +313,8 @@ func Test_mapper_MapRulesMultipleFiles(t *testing.T) { updated, files, err := m.MapRules(testUser, twoFilesUpdatedRuleSet) require.True(t, updated) require.Len(t, files, 2) - require.True(t, sliceContains(t, fileOnePath, files)) - require.True(t, sliceContains(t, fileTwoPath, files)) + require.True(t, slices.Contains(files, fileOnePath)) + require.True(t, slices.Contains(files, fileTwoPath)) require.NoError(t, err) exists, err := afero.Exists(m.FS, fileOnePath) @@ -375,18 +376,6 @@ func Test_mapper_MapRulesSpecialCharNamespace(t *testing.T) { }) } -func sliceContains(t *testing.T, find string, in []string) bool { - t.Helper() - - for _, s := range in { - if find == s { - return true - } - } - - return false -} - func TestYamlFormatting(t *testing.T) { l := log.NewLogfmtLogger(os.Stdout) l = level.NewFilter(l, level.AllowInfo()) diff --git a/pkg/ruler/notifier_test.go b/pkg/ruler/notifier_test.go index 8d3c6ba2af..e27e3527ed 100644 --- a/pkg/ruler/notifier_test.go +++ b/pkg/ruler/notifier_test.go @@ -225,9 +225,7 @@ func TestBuildNotifierConfig(t *testing.T) { name: "with external labels", cfg: &Config{ AlertmanagerURL: "http://alertmanager.default.svc.cluster.local/alertmanager", - ExternalLabels: []labels.Label{ - {Name: "region", Value: "us-east-1"}, - }, + ExternalLabels: labels.FromStrings("region", "us-east-1"), }, ncfg: &config.Config{ AlertingConfig: config.AlertingConfig{ @@ -247,9 +245,7 @@ func TestBuildNotifierConfig(t *testing.T) { }, }, GlobalConfig: config.GlobalConfig{ - ExternalLabels: []labels.Label{ - {Name: "region", Value: "us-east-1"}, - }, + ExternalLabels: labels.FromStrings("region", "us-east-1"), }, }, }, diff --git a/pkg/ruler/ruler.go b/pkg/ruler/ruler.go index 70c07233f4..9b03acae15 100644 --- a/pkg/ruler/ruler.go +++ b/pkg/ruler/ruler.go @@ -8,6 +8,7 @@ import ( "net/http" "net/url" "path/filepath" + "slices" "sort" "strings" "sync" @@ -180,7 +181,7 @@ type Config struct { // Validate config and returns error on failure func (cfg *Config) Validate(limits validation.Limits, log log.Logger) error { - if !util.StringsContain(supportedShardingStrategies, cfg.ShardingStrategy) { + if !slices.Contains(supportedShardingStrategies, cfg.ShardingStrategy) { return errInvalidShardingStrategy } @@ -200,7 +201,7 @@ func (cfg *Config) Validate(limits validation.Limits, log log.Logger) error { return errInvalidMaxConcurrentEvals } - if !util.StringsContain(supportedQueryResponseFormats, cfg.QueryResponseFormat) { + if !slices.Contains(supportedQueryResponseFormats, cfg.QueryResponseFormat) { return errInvalidQueryResponseFormat } @@ -621,7 +622,7 @@ func (r *Ruler) nonPrimaryInstanceOwnsRuleGroup(g *rulespb.RuleGroupDesc, replic ctx, cancel := context.WithTimeout(ctx, r.cfg.LivenessCheckTimeout) defer cancel() - err := concurrency.ForEach(ctx, jobs, len(jobs), func(ctx context.Context, job interface{}) error { + err := concurrency.ForEach(ctx, jobs, len(jobs), func(ctx context.Context, job any) error { addr := job.(string) rulerClient, err := r.GetClientFor(addr) if err != nil { @@ -693,13 +694,21 @@ func (r *Ruler) run(ctx context.Context) error { ringTickerChan = ringTicker.C } - r.syncRules(ctx, rulerSyncReasonInitial) + syncRuleErrMsg := func(syncRulesErr error) { + level.Error(r.logger).Log("msg", "failed to sync rules", "err", syncRulesErr) + } + + initialSyncErr := r.syncRules(ctx, rulerSyncReasonInitial) + if initialSyncErr != nil { + syncRuleErrMsg(initialSyncErr) + } for { + var syncRulesErr error select { case <-ctx.Done(): return nil case <-tick.C: - r.syncRules(ctx, rulerSyncReasonPeriodic) + syncRulesErr = r.syncRules(ctx, rulerSyncReasonPeriodic) case <-ringTickerChan: // We ignore the error because in case of error it will return an empty // replication set which we use to compare with the previous state. @@ -707,15 +716,18 @@ func (r *Ruler) run(ctx context.Context) error { if ring.HasReplicationSetChanged(ringLastState, currRingState) { ringLastState = currRingState - r.syncRules(ctx, rulerSyncReasonRingChange) + syncRulesErr = r.syncRules(ctx, rulerSyncReasonRingChange) } case err := <-r.subservicesWatcher.Chan(): return errors.Wrap(err, "ruler subservice failed") } + if syncRulesErr != nil { + syncRuleErrMsg(syncRulesErr) + } } } -func (r *Ruler) syncRules(ctx context.Context, reason string) { +func (r *Ruler) syncRules(ctx context.Context, reason string) error { level.Info(r.logger).Log("msg", "syncing rules", "reason", reason) r.rulerSync.WithLabelValues(reason).Inc() timer := prometheus.NewTimer(nil) @@ -727,12 +739,12 @@ func (r *Ruler) syncRules(ctx context.Context, reason string) { loadedConfigs, backupConfigs, err := r.loadRuleGroups(ctx) if err != nil { - return + return err } if ctx.Err() != nil { level.Info(r.logger).Log("msg", "context is canceled. not syncing rules") - return + return err } // This will also delete local group files for users that are no longer in 'configs' map. r.manager.SyncRuleGroups(ctx, loadedConfigs) @@ -740,6 +752,8 @@ func (r *Ruler) syncRules(ctx context.Context, reason string) { if r.cfg.RulesBackupEnabled() { r.manager.BackUpRuleGroups(ctx, backupConfigs) } + + return nil } func (r *Ruler) loadRuleGroups(ctx context.Context) (map[string]rulespb.RuleGroupList, map[string]rulespb.RuleGroupList, error) { @@ -897,13 +911,10 @@ func (r *Ruler) listRulesShuffleSharding(ctx context.Context) (map[string]rulesp gLock := sync.Mutex{} ruleGroupCounts := make(map[string]int, len(userRings)) - concurrency := loadRulesConcurrency - if len(userRings) < concurrency { - concurrency = len(userRings) - } + concurrency := min(len(userRings), loadRulesConcurrency) g, gctx := errgroup.WithContext(ctx) - for i := 0; i < concurrency; i++ { + for range concurrency { g.Go(func() error { for userID := range userCh { groups, err := r.store.ListRuleGroupsForUserAndNamespace(gctx, userID, "") @@ -1374,7 +1385,7 @@ func (r *Ruler) getShardedRules(ctx context.Context, userID string, rulesRequest } // Concurrently fetch rules from all rulers. jobs := concurrency.CreateJobsFromStrings(rulers.GetAddresses()) - err = concurrency.ForEach(ctx, jobs, len(jobs), func(ctx context.Context, job interface{}) error { + err = concurrency.ForEach(ctx, jobs, len(jobs), func(ctx context.Context, job any) error { addr := job.(string) rulerClient, err := r.clientsPool.GetClientFor(addr) @@ -1520,7 +1531,7 @@ func (r *Ruler) ListAllRules(w http.ResponseWriter, req *http.Request) { } done := make(chan struct{}) - iter := make(chan interface{}) + iter := make(chan any) go func() { util.StreamWriteYAMLV3Response(w, iter, logger) diff --git a/pkg/ruler/ruler_ring.go b/pkg/ruler/ruler_ring.go index 215a711f02..2524987038 100644 --- a/pkg/ruler/ruler_ring.go +++ b/pkg/ruler/ruler_ring.go @@ -38,12 +38,13 @@ var ListRuleRingOp = ring.NewOp([]ring.InstanceState{ring.ACTIVE, ring.LEAVING}, // is used to strip down the config to the minimum, and avoid confusion // to the user. type RingConfig struct { - KVStore kv.Config `yaml:"kvstore"` - HeartbeatPeriod time.Duration `yaml:"heartbeat_period"` - HeartbeatTimeout time.Duration `yaml:"heartbeat_timeout"` - ReplicationFactor int `yaml:"replication_factor"` - ZoneAwarenessEnabled bool `yaml:"zone_awareness_enabled"` - TokensFilePath string `yaml:"tokens_file_path"` + KVStore kv.Config `yaml:"kvstore"` + HeartbeatPeriod time.Duration `yaml:"heartbeat_period"` + HeartbeatTimeout time.Duration `yaml:"heartbeat_timeout"` + ReplicationFactor int `yaml:"replication_factor"` + ZoneAwarenessEnabled bool `yaml:"zone_awareness_enabled"` + TokensFilePath string `yaml:"tokens_file_path"` + DetailedMetricsEnabled bool `yaml:"detailed_metrics_enabled"` // Instance details InstanceID string `yaml:"instance_id" doc:"hidden"` @@ -77,6 +78,7 @@ func (cfg *RingConfig) RegisterFlags(f *flag.FlagSet) { f.IntVar(&cfg.ReplicationFactor, "ruler.ring.replication-factor", 1, "EXPERIMENTAL: The replication factor to use when loading rule groups for API HA.") f.BoolVar(&cfg.ZoneAwarenessEnabled, "ruler.ring.zone-awareness-enabled", false, "EXPERIMENTAL: True to enable zone-awareness and load rule groups across different availability zones for API HA.") f.StringVar(&cfg.TokensFilePath, "ruler.ring.tokens-file-path", "", "EXPERIMENTAL: File path where tokens are stored. If empty, tokens are not stored at shutdown and restored at startup.") + f.BoolVar(&cfg.DetailedMetricsEnabled, "ruler.ring.detailed-metrics-enabled", true, "Set to true to enable ring detailed metrics. These metrics provide detailed information, such as token count and ownership per tenant. Disabling them can significantly decrease the number of metrics emitted.") // Instance flags cfg.InstanceInterfaceNames = []string{"eth0", "en0"} @@ -119,6 +121,7 @@ func (cfg *RingConfig) ToRingConfig() ring.Config { rc.HeartbeatTimeout = cfg.HeartbeatTimeout rc.SubringCacheDisabled = true rc.ZoneAwarenessEnabled = cfg.ZoneAwarenessEnabled + rc.DetailedMetricsEnabled = cfg.DetailedMetricsEnabled // Each rule group is evaluated by *exactly* one ruler, but it can be loaded by multiple rulers for API HA rc.ReplicationFactor = cfg.ReplicationFactor @@ -160,10 +163,7 @@ func GetReplicationSetForListRule(r ring.ReadRing, cfg *RingConfig) (ring.Replic return ring.ReplicationSet{}, zoneFailures, ring.ErrTooManyUnhealthyInstances } } else { - numRequired := len(healthy) + len(unhealthy) - if numRequired < r.ReplicationFactor() { - numRequired = r.ReplicationFactor() - } + numRequired := max(len(healthy)+len(unhealthy), r.ReplicationFactor()) // quorum is not required so 1 replica is enough to handle the request numRequired -= r.ReplicationFactor() - 1 if len(healthy) < numRequired { diff --git a/pkg/ruler/ruler_ring_test.go b/pkg/ruler/ruler_ring_test.go index 7dd3cca9a9..3b388e456b 100644 --- a/pkg/ruler/ruler_ring_test.go +++ b/pkg/ruler/ruler_ring_test.go @@ -262,7 +262,7 @@ func TestGetReplicationSetForListRule(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(context.Background(), rulerRing)) t.Cleanup(rulerRing.StopAsync) - err := kvStore.CAS(context.Background(), ringKey, func(in interface{}) (out interface{}, retry bool, err error) { + err := kvStore.CAS(context.Background(), ringKey, func(in any) (out any, retry bool, err error) { d, _ := in.(*ring.Desc) if d == nil { d = ring.NewDesc() diff --git a/pkg/ruler/ruler_test.go b/pkg/ruler/ruler_test.go index 538d7a0ac2..755eb49fa3 100644 --- a/pkg/ruler/ruler_test.go +++ b/pkg/ruler/ruler_test.go @@ -6,13 +6,14 @@ import ( "errors" "fmt" "io" + "maps" "math/rand" "net/http" "net/http/httptest" "net/url" "os" "reflect" - "sort" + "slices" "strings" "sync" "testing" @@ -414,7 +415,7 @@ func TestNotifierSendsUserIDHeader(t *testing.T) { time.Sleep(10 * time.Millisecond) } n.Send(¬ifier.Alert{ - Labels: labels.Labels{labels.Label{Name: "alertname", Value: "testalert"}}, + Labels: labels.FromStrings("alertname", "testalert"), }) wg.Wait() @@ -450,7 +451,7 @@ func TestNotifierSendExternalLabels(t *testing.T) { cfg := defaultRulerConfig(t) cfg.AlertmanagerURL = ts.URL cfg.AlertmanagerDiscovery = false - cfg.ExternalLabels = []labels.Label{{Name: "region", Value: "us-east-1"}} + cfg.ExternalLabels = labels.FromStrings("region", "us-east-1") limits := &ruleLimits{} engine, queryable, pusher, logger, _, reg := testSetup(t, nil) metrics := NewRuleEvalMetrics(cfg, nil) @@ -481,20 +482,19 @@ func TestNotifierSendExternalLabels(t *testing.T) { }, { name: "local labels without overriding", - userExternalLabels: labels.FromStrings("mylabel", "local"), + userExternalLabels: []labels.Label{{Name: "mylabel", Value: "local"}}, expectedExternalLabels: []labels.Label{{Name: "region", Value: "us-east-1"}, {Name: "mylabel", Value: "local"}}, }, { name: "local labels that override globals", - userExternalLabels: labels.FromStrings("region", "cloud", "mylabel", "local"), + userExternalLabels: []labels.Label{{Name: "region", Value: "cloud"}, {Name: "mylabel", Value: "local"}}, expectedExternalLabels: []labels.Label{{Name: "region", Value: "cloud"}, {Name: "mylabel", Value: "local"}}, }, } for _, test := range tests { - test := test t.Run(test.name, func(t *testing.T) { - limits.setRulerExternalLabels(test.userExternalLabels) + limits.setRulerExternalLabels(labels.New(test.userExternalLabels...)) manager.SyncRuleGroups(context.Background(), map[string]rulespb.RuleGroupList{ userID: {&rulespb.RuleGroupDesc{Name: "group", Namespace: "ns", Interval: time.Minute, User: userID}}, }) @@ -506,7 +506,7 @@ func TestNotifierSendExternalLabels(t *testing.T) { }, 10*time.Second, 10*time.Millisecond) n.notifier.Send(¬ifier.Alert{ - Labels: labels.Labels{labels.Label{Name: "alertname", Value: "testalert"}}, + Labels: labels.FromStrings("alertname", "testalert"), }) select { case <-time.After(5 * time.Second): @@ -1319,7 +1319,7 @@ func TestGetRules(t *testing.T) { } if tc.sharding { - err := kvStore.CAS(context.Background(), ringKey, func(in interface{}) (out interface{}, retry bool, err error) { + err := kvStore.CAS(context.Background(), ringKey, func(in any) (out any, retry bool, err error) { d, _ := in.(*ring.Desc) if d == nil { d = ring.NewDesc() @@ -1342,12 +1342,13 @@ func TestGetRules(t *testing.T) { // Sync Rules forEachRuler(func(_ string, r *Ruler) { - r.syncRules(context.Background(), rulerSyncReasonInitial) + err := r.syncRules(context.Background(), rulerSyncReasonInitial) + require.NoError(t, err) }) if tc.sharding { // update the State of the rulers in the ring based on tc.rulerStateMap - err := kvStore.CAS(context.Background(), ringKey, func(in interface{}) (out interface{}, retry bool, err error) { + err := kvStore.CAS(context.Background(), ringKey, func(in any) (out any, retry bool, err error) { d, _ := in.(*ring.Desc) if d == nil { d = ring.NewDesc() @@ -1550,7 +1551,7 @@ func TestGetRulesFromBackup(t *testing.T) { } } - err := kvStore.CAS(context.Background(), ringKey, func(in interface{}) (out interface{}, retry bool, err error) { + err := kvStore.CAS(context.Background(), ringKey, func(in any) (out any, retry bool, err error) { d, _ := in.(*ring.Desc) if d == nil { d = ring.NewDesc() @@ -1572,11 +1573,12 @@ func TestGetRulesFromBackup(t *testing.T) { // Sync Rules forEachRuler(func(_ string, r *Ruler) { - r.syncRules(context.Background(), rulerSyncReasonInitial) + err := r.syncRules(context.Background(), rulerSyncReasonInitial) + require.NoError(t, err) }) // update the State of the rulers in the ring based on tc.rulerStateMap - err = kvStore.CAS(context.Background(), ringKey, func(in interface{}) (out interface{}, retry bool, err error) { + err = kvStore.CAS(context.Background(), ringKey, func(in any) (out any, retry bool, err error) { d, _ := in.(*ring.Desc) if d == nil { d = ring.NewDesc() @@ -1766,7 +1768,7 @@ func getRulesHATest(replicationFactor int) func(t *testing.T) { } } - err := kvStore.CAS(context.Background(), ringKey, func(in interface{}) (out interface{}, retry bool, err error) { + err := kvStore.CAS(context.Background(), ringKey, func(in any) (out any, retry bool, err error) { d, _ := in.(*ring.Desc) if d == nil { d = ring.NewDesc() @@ -1788,11 +1790,12 @@ func getRulesHATest(replicationFactor int) func(t *testing.T) { // Sync Rules forEachRuler(func(_ string, r *Ruler) { - r.syncRules(context.Background(), rulerSyncReasonInitial) + err := r.syncRules(context.Background(), rulerSyncReasonInitial) + require.NoError(t, err) }) // update the State of the rulers in the ring based on tc.rulerStateMap - err = kvStore.CAS(context.Background(), ringKey, func(in interface{}) (out interface{}, retry bool, err error) { + err = kvStore.CAS(context.Background(), ringKey, func(in any) (out any, retry bool, err error) { d, _ := in.(*ring.Desc) if d == nil { d = ring.NewDesc() @@ -1811,8 +1814,10 @@ func getRulesHATest(replicationFactor int) func(t *testing.T) { t.Errorf("ruler %s was not terminated with error %s", "ruler1", err.Error()) } - rulerAddrMap["ruler2"].syncRules(context.Background(), rulerSyncReasonPeriodic) - rulerAddrMap["ruler3"].syncRules(context.Background(), rulerSyncReasonPeriodic) + err = rulerAddrMap["ruler2"].syncRules(context.Background(), rulerSyncReasonPeriodic) + require.NoError(t, err) + err = rulerAddrMap["ruler3"].syncRules(context.Background(), rulerSyncReasonPeriodic) + require.NoError(t, err) requireGroupStateEqual := func(a *GroupStateDesc, b *GroupStateDesc) { require.Equal(t, a.Group.Interval, b.Group.Interval) @@ -2371,7 +2376,7 @@ func TestSharding(t *testing.T) { } if tc.setupRing != nil { - err := kvStore.CAS(context.Background(), ringKey, func(in interface{}) (out interface{}, retry bool, err error) { + err := kvStore.CAS(context.Background(), ringKey, func(in any) (out any, retry bool, err error) { d, _ := in.(*ring.Desc) if d == nil { d = ring.NewDesc() @@ -2439,9 +2444,7 @@ func userToken(user string, skip int) uint32 { } func sortTokens(tokens []uint32) []uint32 { - sort.Slice(tokens, func(i, j int) bool { - return tokens[i] < tokens[j] - }) + slices.Sort(tokens) return tokens } @@ -2500,7 +2503,7 @@ func Test_LoadPartialGroups(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(context.Background(), r1)) t.Cleanup(r1.StopAsync) - err := kvStore.CAS(context.Background(), ringKey, func(in interface{}) (out interface{}, retry bool, err error) { + err := kvStore.CAS(context.Background(), ringKey, func(in any) (out any, retry bool, err error) { d, _ := in.(*ring.Desc) if d == nil { d = ring.NewDesc() @@ -2511,7 +2514,7 @@ func Test_LoadPartialGroups(t *testing.T) { require.NoError(t, err) - test.Poll(t, time.Second*5, true, func() interface{} { + test.Poll(t, time.Second*5, true, func() any { return len(r1.manager.GetRules(user2)) > 0 && len(r1.manager.GetRules(user3)) > 0 }) @@ -2680,8 +2683,8 @@ func TestSendAlerts(t *testing.T) { { in: []*promRules.Alert{ { - Labels: []labels.Label{{Name: "l1", Value: "v1"}}, - Annotations: []labels.Label{{Name: "a2", Value: "v2"}}, + Labels: labels.FromStrings("l1", "v1"), + Annotations: labels.FromStrings("a2", "v2"), ActiveAt: time.Unix(1, 0), FiredAt: time.Unix(2, 0), ValidUntil: time.Unix(3, 0), @@ -2689,8 +2692,8 @@ func TestSendAlerts(t *testing.T) { }, exp: []*notifier.Alert{ { - Labels: []labels.Label{{Name: "l1", Value: "v1"}}, - Annotations: []labels.Label{{Name: "a2", Value: "v2"}}, + Labels: labels.FromStrings("l1", "v1"), + Annotations: labels.FromStrings("a2", "v2"), StartsAt: time.Unix(2, 0), EndsAt: time.Unix(3, 0), GeneratorURL: "http://localhost:9090/graph?g0.expr=up&g0.tab=1", @@ -2700,8 +2703,8 @@ func TestSendAlerts(t *testing.T) { { in: []*promRules.Alert{ { - Labels: []labels.Label{{Name: "l1", Value: "v1"}}, - Annotations: []labels.Label{{Name: "a2", Value: "v2"}}, + Labels: labels.FromStrings("l1", "v1"), + Annotations: labels.FromStrings("a2", "v2"), ActiveAt: time.Unix(1, 0), FiredAt: time.Unix(2, 0), ResolvedAt: time.Unix(4, 0), @@ -2709,8 +2712,8 @@ func TestSendAlerts(t *testing.T) { }, exp: []*notifier.Alert{ { - Labels: []labels.Label{{Name: "l1", Value: "v1"}}, - Annotations: []labels.Label{{Name: "a2", Value: "v2"}}, + Labels: labels.FromStrings("l1", "v1"), + Annotations: labels.FromStrings("a2", "v2"), StartsAt: time.Unix(2, 0), EndsAt: time.Unix(4, 0), GeneratorURL: "http://localhost:9090/graph?g0.expr=up&g0.tab=1", @@ -2723,7 +2726,6 @@ func TestSendAlerts(t *testing.T) { } for i, tc := range testCases { - tc := tc t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { senderFunc := senderFunc(func(alerts ...*notifier.Alert) { if len(tc.in) == 0 { @@ -2800,7 +2802,8 @@ func TestRecoverAlertsPostOutage(t *testing.T) { evalFunc := func(ctx context.Context, g *promRules.Group, evalTimestamp time.Time) {} r, _ := buildRulerWithIterFunc(t, rulerCfg, &querier.TestConfig{Cfg: querierConfig, Distributor: d, Stores: queryables}, store, nil, evalFunc) - r.syncRules(context.Background(), rulerSyncReasonInitial) + err := r.syncRules(context.Background(), rulerSyncReasonInitial) + require.NoError(t, err) // assert initial state of rule group ruleGroup := r.manager.GetRules("user1")[0] @@ -3047,7 +3050,7 @@ func TestRulerDisablesRuleGroups(t *testing.T) { } if tc.setupRing != nil { - err := kvStore.CAS(context.Background(), ringKey, func(in interface{}) (out interface{}, retry bool, err error) { + err := kvStore.CAS(context.Background(), ringKey, func(in any) (out any, retry bool, err error) { d, _ := in.(*ring.Desc) if d == nil { d = ring.NewDesc() @@ -3081,9 +3084,7 @@ func TestRulerDisablesRuleGroups(t *testing.T) { if loaded == nil { loaded = map[string]rulespb.RuleGroupList{} } - for k, v := range loaded { - actualRules[k] = v - } + maps.Copy(actualRules, loaded) } } @@ -3236,7 +3237,7 @@ func TestGetShardSizeForUser(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(context.Background(), r.lifecycler)) } - err := kvStore.CAS(context.Background(), ringKey, func(in interface{}) (out interface{}, retry bool, err error) { + err := kvStore.CAS(context.Background(), ringKey, func(in any) (out any, retry bool, err error) { d, _ := in.(*ring.Desc) if d == nil { d = ring.NewDesc() @@ -3265,7 +3266,8 @@ func TestGetShardSizeForUser(t *testing.T) { // Sync Rules forEachRuler(func(_ string, r *Ruler) { - r.syncRules(context.Background(), rulerSyncReasonInitial) + err := r.syncRules(context.Background(), rulerSyncReasonInitial) + require.NoError(t, err) }) result := testRuler.getShardSizeForUser(tc.userID) diff --git a/pkg/ruler/rulestore/bucketclient/bucket_client.go b/pkg/ruler/rulestore/bucketclient/bucket_client.go index 049524f500..8515ad910a 100644 --- a/pkg/ruler/rulestore/bucketclient/bucket_client.go +++ b/pkg/ruler/rulestore/bucketclient/bucket_client.go @@ -182,7 +182,7 @@ func (b *BucketRuleStore) LoadRuleGroups(ctx context.Context, groupsToLoad map[s // download all rule groups in parallel. We limit the number of workers to avoid a // particular user having too many rule groups rate limiting us with the object storage. g, gCtx := errgroup.WithContext(ctx) - for i := 0; i < loadConcurrency; i++ { + for range loadConcurrency { g.Go(func() error { for gr := range ch { user, namespace, group := gr.GetUser(), gr.GetNamespace(), gr.GetName() diff --git a/pkg/ruler/rulestore/bucketclient/bucket_client_test.go b/pkg/ruler/rulestore/bucketclient/bucket_client_test.go index 460a882118..0afa4b155c 100644 --- a/pkg/ruler/rulestore/bucketclient/bucket_client_test.go +++ b/pkg/ruler/rulestore/bucketclient/bucket_client_test.go @@ -28,7 +28,7 @@ type testGroup struct { } func TestListRules(t *testing.T) { - runForEachRuleStore(t, func(t *testing.T, rs rulestore.RuleStore, _ interface{}) { + runForEachRuleStore(t, func(t *testing.T, rs rulestore.RuleStore, _ any) { groups := []testGroup{ {user: "user1", namespace: "hello", ruleGroup: rulefmt.RuleGroup{Name: "first testGroup"}}, {user: "user1", namespace: "hello", ruleGroup: rulefmt.RuleGroup{Name: "second testGroup"}}, @@ -132,7 +132,7 @@ func TestLoadPartialRules(t *testing.T) { } func TestLoadRules(t *testing.T) { - runForEachRuleStore(t, func(t *testing.T, rs rulestore.RuleStore, _ interface{}) { + runForEachRuleStore(t, func(t *testing.T, rs rulestore.RuleStore, _ any) { groups := []testGroup{ {user: "user1", namespace: "hello", ruleGroup: rulefmt.RuleGroup{Name: "first testGroup", Interval: model.Duration(time.Minute), Rules: []rulefmt.Rule{{ For: model.Duration(5 * time.Minute), @@ -201,7 +201,7 @@ func TestLoadRules(t *testing.T) { } func TestDelete(t *testing.T) { - runForEachRuleStore(t, func(t *testing.T, rs rulestore.RuleStore, bucketClient interface{}) { + runForEachRuleStore(t, func(t *testing.T, rs rulestore.RuleStore, bucketClient any) { groups := []testGroup{ {user: "user1", namespace: "A", ruleGroup: rulefmt.RuleGroup{Name: "1"}}, {user: "user1", namespace: "A", ruleGroup: rulefmt.RuleGroup{Name: "2"}}, @@ -261,13 +261,13 @@ func TestDelete(t *testing.T) { }) } -func runForEachRuleStore(t *testing.T, testFn func(t *testing.T, store rulestore.RuleStore, bucketClient interface{})) { +func runForEachRuleStore(t *testing.T, testFn func(t *testing.T, store rulestore.RuleStore, bucketClient any)) { bucketClient := objstore.NewInMemBucket() bucketStore := NewBucketRuleStore(bucketClient, nil, log.NewNopLogger()) stores := map[string]struct { store rulestore.RuleStore - client interface{} + client any }{ "bucket": {store: bucketStore, client: bucketClient}, } @@ -279,7 +279,7 @@ func runForEachRuleStore(t *testing.T, testFn func(t *testing.T, store rulestore } } -func getSortedObjectKeys(bucketClient interface{}) []string { +func getSortedObjectKeys(bucketClient any) []string { if typed, ok := bucketClient.(*objstore.InMemBucket); ok { var keys []string for key := range typed.Objects() { diff --git a/pkg/scheduler/fragment_table/fragment_table.go b/pkg/scheduler/fragment_table/fragment_table.go new file mode 100644 index 0000000000..3cf161070b --- /dev/null +++ b/pkg/scheduler/fragment_table/fragment_table.go @@ -0,0 +1,79 @@ +package fragment_table + +import ( + "sync" + "time" + + "github.com/cortexproject/cortex/pkg/distributed_execution" +) + +type fragmentEntry struct { + addr string + createdAt time.Time +} + +// FragmentTable maintains a mapping between query fragments and their assigned querier addresses. +// Entries automatically expire after a configured duration to prevent stale mappings. +type FragmentTable struct { + mappings map[distributed_execution.FragmentKey]*fragmentEntry + mu sync.RWMutex + expiration time.Duration +} + +// NewFragmentTable creates a new FragmentTable with the specified expiration duration. +// It starts a background goroutine that periodically removes expired entries. +// The cleanup interval is set to half of the expiration duration. +func NewFragmentTable(expiration time.Duration) *FragmentTable { + ft := &FragmentTable{ + mappings: make(map[distributed_execution.FragmentKey]*fragmentEntry), + expiration: expiration, + } + go ft.periodicCleanup() + return ft +} + +// AddAddressByID associates a querier address with a specific fragment of a query. +// The association will automatically expire after the configured duration. +func (f *FragmentTable) AddAddressByID(queryID uint64, fragmentID uint64, addr string) { + f.mu.Lock() + defer f.mu.Unlock() + key := distributed_execution.MakeFragmentKey(queryID, fragmentID) + f.mappings[key] = &fragmentEntry{ + addr: addr, + createdAt: time.Now(), + } +} + +// GetAddrByID retrieves the querier address associated with a specific fragment. +func (f *FragmentTable) GetAddrByID(queryID uint64, fragmentID uint64) (string, bool) { + f.mu.RLock() + defer f.mu.RUnlock() + key := distributed_execution.MakeFragmentKey(queryID, fragmentID) + if entry, ok := f.mappings[key]; ok { + return entry.addr, true + } + return "", false +} + +func (f *FragmentTable) cleanupExpired() { + f.mu.Lock() + defer f.mu.Unlock() + now := time.Now() + keysToDelete := make([]distributed_execution.FragmentKey, 0) + for key, entry := range f.mappings { + if now.Sub(entry.createdAt) > f.expiration { + keysToDelete = append(keysToDelete, key) + } + } + for _, key := range keysToDelete { + delete(f.mappings, key) + } +} + +func (f *FragmentTable) periodicCleanup() { + ticker := time.NewTicker(f.expiration / 2) + defer ticker.Stop() + for range ticker.C { + f.cleanupExpired() + } +} diff --git a/pkg/scheduler/fragment_table/fragment_table_test.go b/pkg/scheduler/fragment_table/fragment_table_test.go new file mode 100644 index 0000000000..576e272645 --- /dev/null +++ b/pkg/scheduler/fragment_table/fragment_table_test.go @@ -0,0 +1,165 @@ +package fragment_table + +import ( + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNewFragmentTable(t *testing.T) { + tests := []struct { + name string + expiration time.Duration + }{ + { + name: "with positive expiration", + expiration: time.Hour, + }, + { + name: "with small expiration", + expiration: time.Millisecond, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ft := NewFragmentTable(tt.expiration) + require.NotNil(t, ft) + require.NotNil(t, ft.mappings) + assert.Equal(t, tt.expiration, ft.expiration) + }) + } +} + +func TestFragmentTable_AddAndGetAddress(t *testing.T) { + ft := NewFragmentTable(time.Hour) + + tests := []struct { + name string + queryID uint64 + fragID uint64 + addr string + wantFound bool + }{ + { + name: "add new address", + queryID: 1, + fragID: 1, + addr: "addr1", + wantFound: true, + }, + { + name: "get non-existent address", + queryID: 1, + fragID: 2, + wantFound: false, + }, + { + name: "overwrite existing address", + queryID: 1, + fragID: 1, + addr: "addr2", + wantFound: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.addr != "" { + ft.AddAddressByID(tt.queryID, tt.fragID, tt.addr) + } + + gotAddr, gotFound := ft.GetAddrByID(tt.queryID, tt.fragID) + assert.Equal(t, tt.wantFound, gotFound) + if tt.wantFound { + assert.Equal(t, tt.addr, gotAddr) + } + }) + } +} + +func TestFragmentTable_Expiration(t *testing.T) { + expiration := 100 * time.Millisecond + ft := NewFragmentTable(expiration) + + t.Run("entries expire after timeout", func(t *testing.T) { + ft.AddAddressByID(1, 1, "addr1") + + // verify it exists + addr, exists := ft.GetAddrByID(1, 1) + require.True(t, exists) + assert.Equal(t, "addr1", addr) + + // wait for expiration + time.Sleep(expiration * 2) + + ft.cleanupExpired() + + // verify it's gone + _, exists = ft.GetAddrByID(1, 1) + assert.False(t, exists) + }) +} + +func TestFragmentTable_ConcurrentAccess(t *testing.T) { + ft := NewFragmentTable(time.Hour) + + const ( + numGoroutines = 10 + numOperations = 100 + ) + + var wg sync.WaitGroup + wg.Add(numGoroutines * 2) + + // start writers + for i := range numGoroutines { + go func(id int) { + defer wg.Done() + for j := range numOperations { + ft.AddAddressByID(uint64(id), uint64(j), "addr") + } + }(i) + } + + // start readers + for i := range numGoroutines { + go func(id int) { + defer wg.Done() + for range numOperations { + } + }(i) + } + + wg.Wait() // wait for all goroutines to finish +} + +func TestFragmentTable_PeriodicCleanup(t *testing.T) { + expiration := 100 * time.Millisecond + ft := NewFragmentTable(expiration) + + ft.AddAddressByID(1, 1, "addr1") + ft.AddAddressByID(1, 2, "addr2") + + // verify entries exist + addr, ok := ft.GetAddrByID(1, 1) + require.True(t, ok) + require.Equal(t, "addr1", addr) + + addr, ok = ft.GetAddrByID(1, 2) + require.True(t, ok) + require.Equal(t, "addr2", addr) + + // wait for automatic cleanup + time.Sleep(expiration * 3) + + // verify entries were cleaned up + _, ok = ft.GetAddrByID(1, 1) + require.False(t, ok) + + _, ok = ft.GetAddrByID(1, 2) + require.False(t, ok) +} diff --git a/pkg/scheduler/queue/queue_test.go b/pkg/scheduler/queue/queue_test.go index 98a1ea0582..36aa97c98a 100644 --- a/pkg/scheduler/queue/queue_test.go +++ b/pkg/scheduler/queue/queue_test.go @@ -23,7 +23,7 @@ func BenchmarkGetNextRequest(b *testing.B) { queues := make([]*RequestQueue, 0, b.N) - for n := 0; n < b.N; n++ { + for b.Loop() { queue := NewRequestQueue(maxOutstandingPerTenant, prometheus.NewGaugeVec(prometheus.GaugeOpts{}, []string{"user", "priority", "type"}), prometheus.NewCounterVec(prometheus.CounterOpts{}, []string{"user", "priority"}), @@ -32,12 +32,12 @@ func BenchmarkGetNextRequest(b *testing.B) { ) queues = append(queues, queue) - for ix := 0; ix < queriers; ix++ { + for ix := range queriers { queue.RegisterQuerierConnection(fmt.Sprintf("querier-%d", ix)) } - for i := 0; i < maxOutstandingPerTenant; i++ { - for j := 0; j < numTenants; j++ { + for range maxOutstandingPerTenant { + for j := range numTenants { userID := strconv.Itoa(j) err := queue.EnqueueRequest(userID, MockRequest{}, 0, nil) @@ -49,11 +49,10 @@ func BenchmarkGetNextRequest(b *testing.B) { } ctx := context.Background() - b.ResetTimer() - for i := 0; i < b.N; i++ { + for i := 0; b.Loop(); i++ { idx := FirstUser() - for j := 0; j < maxOutstandingPerTenant*numTenants; j++ { + for range maxOutstandingPerTenant * numTenants { querier := "" b: // Find querier with at least one request to avoid blocking in getNextRequestForQuerier. @@ -82,7 +81,7 @@ func BenchmarkQueueRequest(b *testing.B) { users := make([]string, 0, numTenants) requests := make([]MockRequest, 0, numTenants) - for n := 0; n < b.N; n++ { + for n := 0; b.Loop(); n++ { q := NewRequestQueue(maxOutstandingPerTenant, prometheus.NewGaugeVec(prometheus.GaugeOpts{}, []string{"user", "priority", "type"}), prometheus.NewCounterVec(prometheus.CounterOpts{}, []string{"user", "priority"}), @@ -90,22 +89,21 @@ func BenchmarkQueueRequest(b *testing.B) { nil, ) - for ix := 0; ix < queriers; ix++ { + for ix := range queriers { q.RegisterQuerierConnection(fmt.Sprintf("querier-%d", ix)) } queues = append(queues, q) - for j := 0; j < numTenants; j++ { + for j := range numTenants { requests = append(requests, MockRequest{id: fmt.Sprintf("%d-%d", n, j)}) users = append(users, strconv.Itoa(j)) } } - b.ResetTimer() - for n := 0; n < b.N; n++ { - for i := 0; i < maxOutstandingPerTenant; i++ { - for j := 0; j < numTenants; j++ { + for n := 0; b.Loop(); n++ { + for range maxOutstandingPerTenant { + for j := range numTenants { err := queues[n].EnqueueRequest(users[j], requests[j], 0, nil) if err != nil { b.Fatal(err) @@ -122,7 +120,7 @@ func BenchmarkGetNextRequestPriorityQueue(b *testing.B) { queues := make([]*RequestQueue, 0, b.N) - for n := 0; n < b.N; n++ { + for b.Loop() { queue := NewRequestQueue(maxOutstandingPerTenant, prometheus.NewGaugeVec(prometheus.GaugeOpts{}, []string{"user", "priority", "type"}), prometheus.NewCounterVec(prometheus.CounterOpts{}, []string{"user", "priority"}), @@ -131,12 +129,12 @@ func BenchmarkGetNextRequestPriorityQueue(b *testing.B) { ) queues = append(queues, queue) - for ix := 0; ix < queriers; ix++ { + for ix := range queriers { queue.RegisterQuerierConnection(fmt.Sprintf("querier-%d", ix)) } - for i := 0; i < maxOutstandingPerTenant; i++ { - for j := 0; j < numTenants; j++ { + for i := range maxOutstandingPerTenant { + for j := range numTenants { userID := strconv.Itoa(j) err := queue.EnqueueRequest(userID, MockRequest{priority: int64(i)}, 0, nil) @@ -148,11 +146,10 @@ func BenchmarkGetNextRequestPriorityQueue(b *testing.B) { } ctx := context.Background() - b.ResetTimer() - for i := 0; i < b.N; i++ { + for i := 0; b.Loop(); i++ { idx := FirstUser() - for j := 0; j < maxOutstandingPerTenant*numTenants; j++ { + for range maxOutstandingPerTenant * numTenants { querier := "" b: // Find querier with at least one request to avoid blocking in getNextRequestForQuerier. @@ -181,7 +178,7 @@ func BenchmarkQueueRequestPriorityQueue(b *testing.B) { users := make([]string, 0, numTenants) requests := make([]MockRequest, 0, numTenants) - for n := 0; n < b.N; n++ { + for n := 0; b.Loop(); n++ { q := NewRequestQueue(maxOutstandingPerTenant, prometheus.NewGaugeVec(prometheus.GaugeOpts{}, []string{"user", "priority", "type"}), prometheus.NewCounterVec(prometheus.CounterOpts{}, []string{"user", "priority"}), @@ -189,22 +186,21 @@ func BenchmarkQueueRequestPriorityQueue(b *testing.B) { nil, ) - for ix := 0; ix < queriers; ix++ { + for ix := range queriers { q.RegisterQuerierConnection(fmt.Sprintf("querier-%d", ix)) } queues = append(queues, q) - for j := 0; j < numTenants; j++ { + for j := range numTenants { requests = append(requests, MockRequest{id: fmt.Sprintf("%d-%d", n, j), priority: int64(j)}) users = append(users, strconv.Itoa(j)) } } - b.ResetTimer() - for n := 0; n < b.N; n++ { - for i := 0; i < maxOutstandingPerTenant; i++ { - for j := 0; j < numTenants; j++ { + for n := 0; b.Loop(); n++ { + for range maxOutstandingPerTenant { + for j := range numTenants { err := queues[n].EnqueueRequest(users[j], requests[j], 0, nil) if err != nil { b.Fatal(err) diff --git a/pkg/scheduler/queue/user_queues.go b/pkg/scheduler/queue/user_queues.go index ee1ce9e804..6a9c24b2c2 100644 --- a/pkg/scheduler/queue/user_queues.go +++ b/pkg/scheduler/queue/user_queues.go @@ -374,7 +374,7 @@ func shuffleQueriersForUser(userSeed int64, queriersToSelect int, allSortedQueri scratchpad = append(scratchpad, allSortedQueriers...) last := len(scratchpad) - 1 - for i := 0; i < queriersToSelect; i++ { + for range queriersToSelect { r := rnd.Intn(last + 1) queriers[scratchpad[r]] = struct{}{} scratchpad[r], scratchpad[last] = scratchpad[last], scratchpad[r] @@ -393,7 +393,7 @@ func getPriorityList(queryPriority validation.QueryPriority, totalQuerierCount i for _, priority := range queryPriority.Priorities { reservedQuerierShardSize := util.DynamicShardSize(priority.ReservedQueriers, totalQuerierCount) - for i := 0; i < reservedQuerierShardSize; i++ { + for range reservedQuerierShardSize { priorityList = append(priorityList, priority.Priority) } } diff --git a/pkg/scheduler/queue/user_queues_test.go b/pkg/scheduler/queue/user_queues_test.go index 0c242eafa7..f70287d51a 100644 --- a/pkg/scheduler/queue/user_queues_test.go +++ b/pkg/scheduler/queue/user_queues_test.go @@ -80,7 +80,7 @@ func TestQueuesWithQueriers(t *testing.T) { maxQueriersPerUser := 5 // Add some queriers. - for ix := 0; ix < queriers; ix++ { + for ix := range queriers { qid := fmt.Sprintf("querier-%d", ix) uq.addQuerierConnection(qid) @@ -93,7 +93,7 @@ func TestQueuesWithQueriers(t *testing.T) { assert.NoError(t, isConsistent(uq)) // Add user queues. - for u := 0; u < users; u++ { + for u := range users { uid := fmt.Sprintf("user-%d", u) getOrAdd(t, uq, uid, maxQueriersPerUser) @@ -106,7 +106,7 @@ func TestQueuesWithQueriers(t *testing.T) { // and compute mean and stdDev. queriersMap := make(map[string]int) - for q := 0; q < queriers; q++ { + for q := range queriers { qid := fmt.Sprintf("querier-%d", q) lastUserIndex := -1 @@ -158,7 +158,7 @@ func TestQueuesConsistency(t *testing.T) { conns := map[string]int{} - for i := 0; i < 10000; i++ { + for i := range 10000 { switch r.Int() % 6 { case 0: assert.NotNil(t, uq.getOrAddQueue(generateTenant(r), 3)) @@ -208,7 +208,7 @@ func TestQueues_ForgetDelay(t *testing.T) { } // Add user queues. - for i := 0; i < numUsers; i++ { + for i := range numUsers { userID := fmt.Sprintf("user-%d", i) getOrAdd(t, uq, userID, maxQueriersPerUser) } @@ -300,7 +300,7 @@ func TestQueues_ForgetDelay_ShouldCorrectlyHandleQuerierReconnectingBeforeForget } // Add user queues. - for i := 0; i < numUsers; i++ { + for i := range numUsers { userID := fmt.Sprintf("user-%d", i) getOrAdd(t, uq, userID, maxQueriersPerUser) } @@ -446,7 +446,7 @@ func TestGetOrAddQueueShouldUpdateProperties(t *testing.T) { assert.IsType(t, &FIFORequestQueue{}, queue) // check the queriers and reservedQueriers map are consistent - for i := 0; i < 100; i++ { + for range 100 { queriers := q.userQueues["userID"].queriers reservedQueriers := q.userQueues["userID"].reservedQueriers q.userQueues["userID"].maxQueriers = 0 // reset to trigger querier assignment @@ -473,7 +473,7 @@ func TestQueueConcurrency(t *testing.T) { var wg sync.WaitGroup wg.Add(numGoRoutines) - for i := 0; i < numGoRoutines; i++ { + for i := range numGoRoutines { go func(cnt int) { defer wg.Done() queue := q.getOrAddQueue("userID", 2) @@ -577,7 +577,7 @@ func getUsersByQuerier(queues *queues, querierID string) []string { return userIDs } -func getKeys(x interface{}) []string { +func getKeys(x any) []string { var keys []string switch i := x.(type) { @@ -620,14 +620,14 @@ func TestShuffleQueriersCorrectness(t *testing.T) { const queriersCount = 100 var allSortedQueriers []string - for i := 0; i < queriersCount; i++ { + for i := range queriersCount { allSortedQueriers = append(allSortedQueriers, fmt.Sprintf("%d", i)) } sort.Strings(allSortedQueriers) r := rand.New(rand.NewSource(time.Now().UnixNano())) const tests = 1000 - for i := 0; i < tests; i++ { + for range tests { toSelect := r.Intn(queriersCount) if toSelect == 0 { toSelect = 3 diff --git a/pkg/scheduler/scheduler.go b/pkg/scheduler/scheduler.go index 7c7ef4b7b3..8223884b26 100644 --- a/pkg/scheduler/scheduler.go +++ b/pkg/scheduler/scheduler.go @@ -3,8 +3,10 @@ package scheduler import ( "context" "flag" + "fmt" "io" "net/http" + "net/url" "sync" "time" @@ -15,14 +17,17 @@ import ( "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/thanos-io/promql-engine/logicalplan" "github.com/weaveworks/common/httpgrpc" "github.com/weaveworks/common/middleware" "github.com/weaveworks/common/user" "google.golang.org/grpc" + "github.com/cortexproject/cortex/pkg/distributed_execution" + "github.com/cortexproject/cortex/pkg/distributed_execution/plan_fragments" "github.com/cortexproject/cortex/pkg/frontend/v2/frontendv2pb" - //lint:ignore faillint scheduler needs to retrieve priority from the context - "github.com/cortexproject/cortex/pkg/querier/stats" + "github.com/cortexproject/cortex/pkg/querier/stats" //lint:ignore faillint scheduler needs to retrieve priority from the context + "github.com/cortexproject/cortex/pkg/scheduler/fragment_table" "github.com/cortexproject/cortex/pkg/scheduler/queue" "github.com/cortexproject/cortex/pkg/scheduler/schedulerpb" "github.com/cortexproject/cortex/pkg/tenant" @@ -55,7 +60,8 @@ type Scheduler struct { activeUsers *util.ActiveUsersCleanupService pendingRequestsMu sync.Mutex - pendingRequests map[requestKey]*schedulerRequest // Request is kept in this map even after being dispatched to querier. It can still be canceled at that time. + + pendingRequests map[requestKey]*schedulerRequest // Request is kept in this map even after being dispatched to querier. It can still be canceled at that time. // Subservices manager. subservices *services.Manager @@ -67,12 +73,26 @@ type Scheduler struct { connectedQuerierClients prometheus.GaugeFunc connectedFrontendClients prometheus.GaugeFunc queueDuration prometheus.Histogram + + // Enables or disables distributed query execution functionality + distributedExecEnabled bool + fragmenter plan_fragments.Fragmenter // Splits logical plans into executable fragments + fragmentTable *fragment_table.FragmentTable // Tracks fragment execution state and querier assignments + + // Maps queries to their fragment IDs for efficient query cancellation. + // Using this map avoids the need to scan all pending requests to find + // fragments belonging to a specific query. + queryFragmentRegistry map[queryKey][]uint64 } -type requestKey struct { +type queryKey struct { frontendAddr string queryID uint64 } +type requestKey struct { + queryKey queryKey + fragmentID uint64 +} type connectedFrontend struct { connections int @@ -95,7 +115,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { } // NewScheduler creates a new Scheduler. -func NewScheduler(cfg Config, limits Limits, log log.Logger, registerer prometheus.Registerer) (*Scheduler, error) { +func NewScheduler(cfg Config, limits Limits, log log.Logger, registerer prometheus.Registerer, distributedExecEnabled bool) (*Scheduler, error) { s := &Scheduler{ cfg: cfg, log: log, @@ -103,6 +123,11 @@ func NewScheduler(cfg Config, limits Limits, log log.Logger, registerer promethe pendingRequests: map[requestKey]*schedulerRequest{}, connectedFrontends: map[string]*connectedFrontend{}, + + fragmentTable: fragment_table.NewFragmentTable(2 * time.Minute), + fragmenter: plan_fragments.NewDummyFragmenter(), + distributedExecEnabled: distributedExecEnabled, + queryFragmentRegistry: map[queryKey][]uint64{}, } s.queueLength = promauto.With(registerer).NewGaugeVec(prometheus.GaugeOpts{ @@ -166,6 +191,11 @@ type schedulerRequest struct { // This is only used for testing. parentSpanContext opentracing.SpanContext + + // fragment represents a portion of the query plan. + // In distributed execution mode, contains a specific plan segment. + // In non-distributed mode, only marks the query as root fragment. + fragment plan_fragments.Fragment } func (s schedulerRequest) Priority() int64 { @@ -177,6 +207,18 @@ func (s schedulerRequest) Priority() int64 { return priority } +func getPlanFromHTTPRequest(req *httpgrpc.HTTPRequest) ([]byte, error) { + if req.Body == nil { + return nil, nil + } + values, err := url.ParseQuery(string(req.Body)) + if err != nil { + return nil, err + } + plan := values.Get("plan") + return []byte(plan), nil +} + // FrontendLoop handles connection from frontend. func (s *Scheduler) FrontendLoop(frontend schedulerpb.SchedulerForFrontend_FrontendLoopServer) error { frontendAddress, frontendCtx, err := s.frontendConnected(frontend) @@ -212,7 +254,19 @@ func (s *Scheduler) FrontendLoop(frontend schedulerpb.SchedulerForFrontend_Front switch msg.GetType() { case schedulerpb.ENQUEUE: - err = s.enqueueRequest(frontendCtx, frontendAddress, msg) + + // If there is a logical plan in the request body, we will fragment it before enqueueing + // otherwise, it will be a single request and is the root and can be enqueued directly + byteLP, err := getPlanFromHTTPRequest(msg.HttpRequest) + if err != nil { + return err + } + if len(byteLP) != 0 { + err = s.fragmentAndEnqueueRequest(frontendCtx, frontendAddress, msg, byteLP) + } else { + err = s.enqueueRequest(frontendCtx, frontendAddress, msg, plan_fragments.Fragment{FragmentID: 0, IsRoot: true}) + } + switch err { case nil: resp = &schedulerpb.SchedulerToFrontend{Status: schedulerpb.OK} @@ -223,7 +277,7 @@ func (s *Scheduler) FrontendLoop(frontend schedulerpb.SchedulerForFrontend_Front } case schedulerpb.CANCEL: - s.cancelRequestAndRemoveFromPending(frontendAddress, msg.QueryID) + s.cancelRequestAndRemoveFromPending(frontendAddress, msg.QueryID, 0, true) resp = &schedulerpb.SchedulerToFrontend{Status: schedulerpb.OK} default: @@ -279,7 +333,57 @@ func (s *Scheduler) frontendDisconnected(frontendAddress string) { } } -func (s *Scheduler) enqueueRequest(frontendContext context.Context, frontendAddr string, msg *schedulerpb.FrontendToScheduler) error { +func updatePlanInHTTPRequest(fragment plan_fragments.Fragment) ([]byte, error) { + byteLP, err := logicalplan.Marshal(fragment.Node) + if err != nil { + return nil, err + } + form := url.Values{} + form.Add("plan", string(byteLP)) + return []byte(form.Encode()), nil +} + +func (s *Scheduler) fragmentAndEnqueueRequest(frontendContext context.Context, frontendAddr string, msg *schedulerpb.FrontendToScheduler, byteLogicalPlan []byte) error { + + // un-serialize logical plan and fragment it + lpNode, err := distributed_execution.Unmarshal(byteLogicalPlan) + if err != nil { + return err + } + + fragments, err := s.fragmenter.Fragment(lpNode) + if err != nil { + return err + } + + for _, fragment := range fragments { + frag := fragment + if err := func() error { + // update http request body with the new fragmented logical plan + newBody, err := updatePlanInHTTPRequest(frag) + if err != nil { + return err + } + msg.HttpRequest = &httpgrpc.HTTPRequest{ + Method: msg.HttpRequest.Method, + Url: msg.HttpRequest.Url, + Headers: msg.HttpRequest.Headers, + Body: newBody, + } + + err = s.enqueueRequest(frontendContext, frontendAddr, msg, frag) + + // if there is an error in any of the process enqueueing the fragments + // immediately propagate the error back + return err + }(); err != nil { + return err + } + } + return nil +} + +func (s *Scheduler) enqueueRequest(frontendContext context.Context, frontendAddr string, msg *schedulerpb.FrontendToScheduler, fragment plan_fragments.Fragment) error { // Create new context for this request, to support cancellation. ctx, cancel := context.WithCancel(frontendContext) shouldCancel := true @@ -305,6 +409,7 @@ func (s *Scheduler) enqueueRequest(frontendContext context.Context, frontendAddr queryID: msg.QueryID, request: msg.HttpRequest, statsEnabled: msg.StatsEnabled, + fragment: fragment, } now := time.Now() @@ -327,21 +432,38 @@ func (s *Scheduler) enqueueRequest(frontendContext context.Context, frontendAddr s.pendingRequestsMu.Lock() defer s.pendingRequestsMu.Unlock() - s.pendingRequests[requestKey{frontendAddr: frontendAddr, queryID: msg.QueryID}] = req + + queryKey := queryKey{frontendAddr: frontendAddr, queryID: msg.QueryID} + s.queryFragmentRegistry[queryKey] = append(s.queryFragmentRegistry[queryKey], req.fragment.FragmentID) + s.pendingRequests[requestKey{queryKey: queryKey, fragmentID: req.fragment.FragmentID}] = req }) } // This method doesn't do removal from the queue. -func (s *Scheduler) cancelRequestAndRemoveFromPending(frontendAddr string, queryID uint64) { +func (s *Scheduler) cancelRequestAndRemoveFromPending(frontendAddr string, queryID uint64, fragmentID uint64, cancelAll bool) { s.pendingRequestsMu.Lock() defer s.pendingRequestsMu.Unlock() - key := requestKey{frontendAddr: frontendAddr, queryID: queryID} - req := s.pendingRequests[key] - if req != nil { - req.ctxCancel() + querykey := queryKey{frontendAddr: frontendAddr, queryID: queryID} + + if cancelAll { + // cancel all requests under the queryID + for _, fragID := range s.queryFragmentRegistry[querykey] { + key := requestKey{queryKey: querykey, fragmentID: fragID} + if req := s.pendingRequests[key]; req != nil { + req.ctxCancel() + } + delete(s.pendingRequests, key) + } + delete(s.queryFragmentRegistry, querykey) + } else { + // cancel specific fragment of the query by its queryID and fragmentID + key := requestKey{queryKey: querykey, fragmentID: fragmentID} + if req := s.pendingRequests[key]; req != nil { + req.ctxCancel() + } + delete(s.pendingRequests, key) } - delete(s.pendingRequests, key) } // QuerierLoop is started by querier to receive queries from scheduler. @@ -392,14 +514,13 @@ func (s *Scheduler) QuerierLoop(querier schedulerpb.SchedulerForQuerier_QuerierL */ if r.ctx.Err() != nil { - // Remove from pending requests. - s.cancelRequestAndRemoveFromPending(r.frontendAddress, r.queryID) + s.cancelRequestAndRemoveFromPending(r.frontendAddress, r.queryID, r.fragment.FragmentID, false) lastUserIndex = lastUserIndex.ReuseLastUser() continue } - if err := s.forwardRequestToQuerier(querier, r); err != nil { + if err := s.forwardRequestToQuerier(querier, r, resp.GetQuerierAddress()); err != nil { return err } } @@ -414,21 +535,41 @@ func (s *Scheduler) NotifyQuerierShutdown(_ context.Context, req *schedulerpb.No return &schedulerpb.NotifyQuerierShutdownResponse{}, nil } -func (s *Scheduler) forwardRequestToQuerier(querier schedulerpb.SchedulerForQuerier_QuerierLoopServer, req *schedulerRequest) error { +func (s *Scheduler) forwardRequestToQuerier(querier schedulerpb.SchedulerForQuerier_QuerierLoopServer, req *schedulerRequest, QuerierAddress string) error { // Make sure to cancel request at the end to cleanup resources. - defer s.cancelRequestAndRemoveFromPending(req.frontendAddress, req.queryID) + defer s.cancelRequestAndRemoveFromPending(req.frontendAddress, req.queryID, req.fragment.FragmentID, false) // Handle the stream sending & receiving on a goroutine so we can // monitoring the contexts in a select and cancel things appropriately. errCh := make(chan error, 1) go func() { + childIDtoAddrs := make(map[uint64]string) + if len(req.fragment.ChildIDs) != 0 { + for _, childID := range req.fragment.ChildIDs { + addr, ok := s.fragmentTable.GetAddrByID(req.queryID, childID) + if !ok { + errCh <- fmt.Errorf("cannot find child addr for parent fragment %d", req.fragment.FragmentID) + return + } + childIDtoAddrs[childID] = addr + } + } + err := querier.Send(&schedulerpb.SchedulerToQuerier{ UserID: req.userID, QueryID: req.queryID, FrontendAddress: req.frontendAddress, HttpRequest: req.request, StatsEnabled: req.statsEnabled, + FragmentID: req.fragment.FragmentID, + ChildIDtoAddrs: childIDtoAddrs, + IsRoot: req.fragment.IsRoot, }) + + if s.distributedExecEnabled { + s.fragmentTable.AddAddressByID(req.queryID, req.fragment.FragmentID, QuerierAddress) + } + if err != nil { errCh <- err return diff --git a/pkg/scheduler/scheduler_test.go b/pkg/scheduler/scheduler_test.go index 9c1d75ad51..d7516b8c6e 100644 --- a/pkg/scheduler/scheduler_test.go +++ b/pkg/scheduler/scheduler_test.go @@ -5,6 +5,7 @@ import ( "fmt" "net" "net/http" + "net/url" "strings" "sync" "testing" @@ -14,12 +15,16 @@ import ( "github.com/opentracing/opentracing-go" "github.com/prometheus/client_golang/prometheus" promtest "github.com/prometheus/client_golang/prometheus/testutil" + "github.com/prometheus/prometheus/promql/parser" "github.com/stretchr/testify/require" + "github.com/thanos-io/promql-engine/logicalplan" + "github.com/thanos-io/promql-engine/query" "github.com/uber/jaeger-client-go/config" "github.com/weaveworks/common/httpgrpc" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" + "github.com/cortexproject/cortex/pkg/distributed_execution" frontendv1 "github.com/cortexproject/cortex/pkg/frontend/v1" "github.com/cortexproject/cortex/pkg/frontend/v2/frontendv2pb" "github.com/cortexproject/cortex/pkg/scheduler/queue" @@ -32,10 +37,10 @@ import ( const testMaxOutstandingPerTenant = 5 -func setupScheduler(t *testing.T, reg prometheus.Registerer) (*Scheduler, schedulerpb.SchedulerForFrontendClient, schedulerpb.SchedulerForQuerierClient) { +func setupScheduler(t *testing.T, reg prometheus.Registerer, distributedExecEnabled bool) (*Scheduler, schedulerpb.SchedulerForFrontendClient, schedulerpb.SchedulerForQuerierClient) { cfg := Config{} flagext.DefaultValues(&cfg) - s, err := NewScheduler(cfg, frontendv1.MockLimits{Queriers: 2, MockLimits: queue.MockLimits{MaxOutstanding: testMaxOutstandingPerTenant}}, log.NewNopLogger(), reg) + s, err := NewScheduler(cfg, frontendv1.MockLimits{Queriers: 2, MockLimits: queue.MockLimits{MaxOutstanding: testMaxOutstandingPerTenant}}, log.NewNopLogger(), reg, distributedExecEnabled) require.NoError(t, err) server := grpc.NewServer() @@ -69,7 +74,7 @@ func setupScheduler(t *testing.T, reg prometheus.Registerer) (*Scheduler, schedu } func TestSchedulerBasicEnqueue(t *testing.T) { - scheduler, frontendClient, querierClient := setupScheduler(t, nil) + scheduler, frontendClient, querierClient := setupScheduler(t, nil, false) frontendLoop := initFrontendLoop(t, frontendClient, "frontend-12345") frontendToScheduler(t, frontendLoop, &schedulerpb.FrontendToScheduler{ @@ -97,7 +102,7 @@ func TestSchedulerBasicEnqueue(t *testing.T) { } func TestSchedulerEnqueueWithCancel(t *testing.T) { - scheduler, frontendClient, querierClient := setupScheduler(t, nil) + scheduler, frontendClient, querierClient := setupScheduler(t, nil, false) frontendLoop := initFrontendLoop(t, frontendClient, "frontend-12345") frontendToScheduler(t, frontendLoop, &schedulerpb.FrontendToScheduler{ @@ -127,7 +132,7 @@ func initQuerierLoop(t *testing.T, querierClient schedulerpb.SchedulerForQuerier } func TestSchedulerEnqueueByMultipleFrontendsWithCancel(t *testing.T) { - scheduler, frontendClient, querierClient := setupScheduler(t, nil) + scheduler, frontendClient, querierClient := setupScheduler(t, nil, false) frontendLoop1 := initFrontendLoop(t, frontendClient, "frontend-1") frontendLoop2 := initFrontendLoop(t, frontendClient, "frontend-2") @@ -168,7 +173,7 @@ func TestSchedulerEnqueueByMultipleFrontendsWithCancel(t *testing.T) { } func TestSchedulerEnqueueWithFrontendDisconnect(t *testing.T) { - scheduler, frontendClient, querierClient := setupScheduler(t, nil) + scheduler, frontendClient, querierClient := setupScheduler(t, nil, false) frontendLoop := initFrontendLoop(t, frontendClient, "frontend-12345") frontendToScheduler(t, frontendLoop, &schedulerpb.FrontendToScheduler{ @@ -179,7 +184,7 @@ func TestSchedulerEnqueueWithFrontendDisconnect(t *testing.T) { }) // Wait until the frontend has connected to the scheduler. - test.Poll(t, time.Second, float64(1), func() interface{} { + test.Poll(t, time.Second, float64(1), func() any { return promtest.ToFloat64(scheduler.connectedFrontendClients) }) @@ -187,7 +192,7 @@ func TestSchedulerEnqueueWithFrontendDisconnect(t *testing.T) { require.NoError(t, frontendLoop.CloseSend()) // Wait until the frontend has disconnected. - test.Poll(t, time.Second, float64(0), func() interface{} { + test.Poll(t, time.Second, float64(0), func() any { return promtest.ToFloat64(scheduler.connectedFrontendClients) }) @@ -198,7 +203,7 @@ func TestSchedulerEnqueueWithFrontendDisconnect(t *testing.T) { } func TestCancelRequestInProgress(t *testing.T) { - scheduler, frontendClient, querierClient := setupScheduler(t, nil) + scheduler, frontendClient, querierClient := setupScheduler(t, nil, false) frontendLoop := initFrontendLoop(t, frontendClient, "frontend-12345") frontendToScheduler(t, frontendLoop, &schedulerpb.FrontendToScheduler{ @@ -231,7 +236,7 @@ func TestCancelRequestInProgress(t *testing.T) { } func TestTracingContext(t *testing.T) { - scheduler, frontendClient, _ := setupScheduler(t, nil) + scheduler, frontendClient, _ := setupScheduler(t, nil, false) frontendLoop := initFrontendLoop(t, frontendClient, "frontend-12345") @@ -262,7 +267,7 @@ func TestTracingContext(t *testing.T) { } func TestSchedulerShutdown_FrontendLoop(t *testing.T) { - scheduler, frontendClient, _ := setupScheduler(t, nil) + scheduler, frontendClient, _ := setupScheduler(t, nil, false) frontendLoop := initFrontendLoop(t, frontendClient, "frontend-12345") @@ -283,7 +288,7 @@ func TestSchedulerShutdown_FrontendLoop(t *testing.T) { } func TestSchedulerShutdown_QuerierLoop(t *testing.T) { - scheduler, frontendClient, querierClient := setupScheduler(t, nil) + scheduler, frontendClient, querierClient := setupScheduler(t, nil, false) frontendLoop := initFrontendLoop(t, frontendClient, "frontend-12345") frontendToScheduler(t, frontendLoop, &schedulerpb.FrontendToScheduler{ @@ -315,9 +320,9 @@ func TestSchedulerShutdown_QuerierLoop(t *testing.T) { } func TestSchedulerMaxOutstandingRequests(t *testing.T) { - _, frontendClient, _ := setupScheduler(t, nil) + _, frontendClient, _ := setupScheduler(t, nil, false) - for i := 0; i < testMaxOutstandingPerTenant; i++ { + for i := range testMaxOutstandingPerTenant { // coming from different frontends fl := initFrontendLoop(t, frontendClient, fmt.Sprintf("frontend-%d", i)) require.NoError(t, fl.Send(&schedulerpb.FrontendToScheduler{ @@ -347,7 +352,7 @@ func TestSchedulerMaxOutstandingRequests(t *testing.T) { } func TestSchedulerForwardsErrorToFrontend(t *testing.T) { - _, frontendClient, querierClient := setupScheduler(t, nil) + _, frontendClient, querierClient := setupScheduler(t, nil, false) fm := &frontendMock{resp: map[uint64]*httpgrpc.HTTPResponse{}} frontendAddress := "" @@ -395,7 +400,7 @@ func TestSchedulerForwardsErrorToFrontend(t *testing.T) { require.NoError(t, querierLoop.CloseSend()) // Verify that frontend was notified about request. - test.Poll(t, 2*time.Second, true, func() interface{} { + test.Poll(t, 2*time.Second, true, func() any { resp := fm.getRequest(100) if resp == nil { return false @@ -409,7 +414,7 @@ func TestSchedulerForwardsErrorToFrontend(t *testing.T) { func TestSchedulerMetrics(t *testing.T) { reg := prometheus.NewPedanticRegistry() - scheduler, frontendClient, _ := setupScheduler(t, reg) + scheduler, frontendClient, _ := setupScheduler(t, reg, false) frontendLoop := initFrontendLoop(t, frontendClient, "frontend-12345") frontendToScheduler(t, frontendLoop, &schedulerpb.FrontendToScheduler{ @@ -448,6 +453,119 @@ func TestSchedulerMetrics(t *testing.T) { `), "cortex_query_scheduler_queue_length", "cortex_request_queue_requests_total")) } +// TestQuerierLoopClient_WithLogicalPlan tests to see if the scheduler enqueues the fragment +// with the expected QueryID, logical plan, and other fragment meta-data +func TestQuerierLoopClient_WithLogicalPlan(t *testing.T) { + reg := prometheus.NewPedanticRegistry() + + scheduler, frontendClient, querierClient := setupScheduler(t, reg, true) + frontendLoop := initFrontendLoop(t, frontendClient, "frontend-12345") + querierLoop, err := querierClient.QuerierLoop(context.Background()) + require.NoError(t, err) + + // CASE 1: request with corrupted logical plan --> expect to fail at un-marshal stage + require.NoError(t, frontendLoop.Send(&schedulerpb.FrontendToScheduler{ + Type: schedulerpb.ENQUEUE, + QueryID: 1, + UserID: "test", + HttpRequest: &httpgrpc.HTTPRequest{Method: "POST", Url: "/hello", Body: []byte("plan=test")}, + })) + msg, err := frontendLoop.Recv() + require.NoError(t, err) + require.True(t, msg.Status == schedulerpb.ERROR) + + // CASE 2: request without logical plan --> expect to not have fragment meta-data + frontendToScheduler(t, frontendLoop, &schedulerpb.FrontendToScheduler{ + Type: schedulerpb.ENQUEUE, + QueryID: 2, + UserID: "test2", + HttpRequest: &httpgrpc.HTTPRequest{Method: "POST", Url: "/hello", Body: []byte{}}, // empty logical plan + }) + require.NoError(t, promtest.GatherAndCompare(reg, strings.NewReader(` + # HELP cortex_query_scheduler_queue_length Number of queries in the queue. + # TYPE cortex_query_scheduler_queue_length gauge + cortex_query_scheduler_queue_length{priority="0",type="fifo",user="test2"} 1 + # HELP cortex_request_queue_requests_total Total number of query requests going to the request queue. + # TYPE cortex_request_queue_requests_total counter + cortex_request_queue_requests_total{priority="0",user="test2"} 1 + `), "cortex_query_scheduler_queue_length", "cortex_request_queue_requests_total")) + + require.NoError(t, querierLoop.Send(&schedulerpb.QuerierToScheduler{QuerierID: "querier-1", QuerierAddress: "localhost:8000"})) + + s2, err := querierLoop.Recv() + require.NoError(t, err) + require.Equal(t, uint64(2), s2.QueryID) + // (the below fields should be empty because the logical plan is not in the request) + require.Empty(t, s2.FragmentID) + require.Empty(t, s2.ChildIDtoAddrs) + require.Empty(t, s2.HttpRequest.Body) + require.True(t, s2.IsRoot) + + // CASE 3: request with correct logical plan --> expect to have fragment metadata + scheduler.cleanupMetricsForInactiveUser("test2") + + lp := createTestLogicalPlan(t, time.Now(), time.Now(), 0, "up") + bytesLp, err := logicalplan.Marshal(lp.Root()) + form := url.Values{} + form.Set("plan", string(bytesLp)) // this is to imitate how the real format of http request body + require.NoError(t, err) + frontendToScheduler(t, frontendLoop, &schedulerpb.FrontendToScheduler{ + Type: schedulerpb.ENQUEUE, + QueryID: 3, + UserID: "test3", + HttpRequest: &httpgrpc.HTTPRequest{Method: "POST", Url: "/hello", Body: []byte(form.Encode())}, + }) + require.NoError(t, promtest.GatherAndCompare(reg, strings.NewReader(` + # HELP cortex_query_scheduler_queue_length Number of queries in the queue. + # TYPE cortex_query_scheduler_queue_length gauge + cortex_query_scheduler_queue_length{priority="0",type="fifo",user="test3"} 1 + # HELP cortex_request_queue_requests_total Total number of query requests going to the request queue. + # TYPE cortex_request_queue_requests_total counter + cortex_request_queue_requests_total{priority="0",user="test3"} 1 + `), "cortex_query_scheduler_queue_length", "cortex_request_queue_requests_total")) + + require.NoError(t, querierLoop.Send(&schedulerpb.QuerierToScheduler{QuerierID: "querier-1", QuerierAddress: "localhost:8000"})) + + s3, err := querierLoop.Recv() + require.NoError(t, err) + require.NotEmpty(t, s3.FragmentID) + require.Equal(t, uint64(3), s3.QueryID) + require.Empty(t, s3.ChildIDtoAddrs) // there is only one fragment for the logical plan, so no child plan_fragments + require.Equal(t, s3.HttpRequest.Body, []byte(form.Encode())) + require.True(t, s3.IsRoot) +} + +func createTestLogicalPlan(t *testing.T, startTime time.Time, endTime time.Time, step time.Duration, q string) logicalplan.Plan { + qOpts := query.Options{ + Start: startTime, + End: startTime, + Step: 0, + StepsBatch: 10, + LookbackDelta: 0, + EnablePerStepStats: false, + } + + if step != 0 { + qOpts.End = endTime + qOpts.Step = step + } + + expr, err := parser.NewParser(q, parser.WithFunctions(parser.Functions)).ParseExpr() + require.NoError(t, err) + + planOpts := logicalplan.PlanOptions{ + DisableDuplicateLabelCheck: false, + } + + logicalPlan, _ := logicalplan.NewFromAST(expr, &qOpts, planOpts) + optimizedPlan, _ := logicalPlan.Optimize(logicalplan.DefaultOptimizers) + dOptimizer := distributed_execution.DistributedOptimizer{} + dOptimizedPlanNode, _ := dOptimizer.Optimize(optimizedPlan.Root(), &qOpts) + lp := logicalplan.New(dOptimizedPlanNode, &qOpts, planOpts) + + return lp +} + func initFrontendLoop(t *testing.T, client schedulerpb.SchedulerForFrontendClient, frontendAddr string) schedulerpb.SchedulerForFrontend_FrontendLoopClient { loop, err := client.FrontendLoop(context.Background()) require.NoError(t, err) @@ -474,7 +592,7 @@ func frontendToScheduler(t *testing.T, frontendLoop schedulerpb.SchedulerForFron // If this verification succeeds, there will be leaked goroutine left behind. It will be cleaned once grpc server is shut down. func verifyQuerierDoesntReceiveRequest(t *testing.T, querierLoop schedulerpb.SchedulerForQuerier_QuerierLoopClient, timeout time.Duration) { - ch := make(chan interface{}, 1) + ch := make(chan any, 1) go func() { m, e := querierLoop.Recv() @@ -494,7 +612,7 @@ func verifyQuerierDoesntReceiveRequest(t *testing.T, querierLoop schedulerpb.Sch } func verifyNoPendingRequestsLeft(t *testing.T, scheduler *Scheduler) { - test.Poll(t, 1*time.Second, 0, func() interface{} { + test.Poll(t, 1*time.Second, 0, func() any { scheduler.pendingRequestsMu.Lock() defer scheduler.pendingRequestsMu.Unlock() return len(scheduler.pendingRequests) diff --git a/pkg/scheduler/schedulerpb/scheduler.pb.go b/pkg/scheduler/schedulerpb/scheduler.pb.go index d3288f95b3..7ba5f7774b 100644 --- a/pkg/scheduler/schedulerpb/scheduler.pb.go +++ b/pkg/scheduler/schedulerpb/scheduler.pb.go @@ -8,6 +8,7 @@ import ( fmt "fmt" _ "github.com/gogo/protobuf/gogoproto" proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" httpgrpc "github.com/weaveworks/common/httpgrpc" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" @@ -85,7 +86,8 @@ func (SchedulerToFrontendStatus) EnumDescriptor() ([]byte, []int) { // Querier reports its own clientID when it connects, so that scheduler knows how many *different* queriers are connected. // To signal that querier is ready to accept another request, querier sends empty message. type QuerierToScheduler struct { - QuerierID string `protobuf:"bytes,1,opt,name=querierID,proto3" json:"querierID,omitempty"` + QuerierID string `protobuf:"bytes,1,opt,name=querierID,proto3" json:"querierID,omitempty"` + QuerierAddress string `protobuf:"bytes,2,opt,name=querierAddress,proto3" json:"querierAddress,omitempty"` } func (m *QuerierToScheduler) Reset() { *m = QuerierToScheduler{} } @@ -127,6 +129,13 @@ func (m *QuerierToScheduler) GetQuerierID() string { return "" } +func (m *QuerierToScheduler) GetQuerierAddress() string { + if m != nil { + return m.QuerierAddress + } + return "" +} + type SchedulerToQuerier struct { // Query ID as reported by frontend. When querier sends the response back to frontend (using frontendAddress), // it identifies the query by using this ID. @@ -139,6 +148,13 @@ type SchedulerToQuerier struct { // Whether query statistics tracking should be enabled. The response will include // statistics only when this option is enabled. StatsEnabled bool `protobuf:"varint,5,opt,name=statsEnabled,proto3" json:"statsEnabled,omitempty"` + // Below are the meta data that will be used for distributed execution + // The ID of current logical query plan fragment. + FragmentID uint64 `protobuf:"varint,6,opt,name=fragmentID,proto3" json:"fragmentID,omitempty"` + // The IDs and addresses of its child fragments + ChildIDtoAddrs map[uint64]string `protobuf:"bytes,7,rep,name=childIDtoAddrs,proto3" json:"childIDtoAddrs,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Whether the current fragment is the root + IsRoot bool `protobuf:"varint,8,opt,name=isRoot,proto3" json:"isRoot,omitempty"` } func (m *SchedulerToQuerier) Reset() { *m = SchedulerToQuerier{} } @@ -208,6 +224,27 @@ func (m *SchedulerToQuerier) GetStatsEnabled() bool { return false } +func (m *SchedulerToQuerier) GetFragmentID() uint64 { + if m != nil { + return m.FragmentID + } + return 0 +} + +func (m *SchedulerToQuerier) GetChildIDtoAddrs() map[uint64]string { + if m != nil { + return m.ChildIDtoAddrs + } + return nil +} + +func (m *SchedulerToQuerier) GetIsRoot() bool { + if m != nil { + return m.IsRoot + } + return false +} + type FrontendToScheduler struct { Type FrontendToSchedulerType `protobuf:"varint,1,opt,name=type,proto3,enum=schedulerpb.FrontendToSchedulerType" json:"type,omitempty"` // Used by INIT message. Will be put into all requests passed to querier. @@ -429,6 +466,7 @@ func init() { proto.RegisterEnum("schedulerpb.SchedulerToFrontendStatus", SchedulerToFrontendStatus_name, SchedulerToFrontendStatus_value) proto.RegisterType((*QuerierToScheduler)(nil), "schedulerpb.QuerierToScheduler") proto.RegisterType((*SchedulerToQuerier)(nil), "schedulerpb.SchedulerToQuerier") + proto.RegisterMapType((map[uint64]string)(nil), "schedulerpb.SchedulerToQuerier.ChildIDtoAddrsEntry") proto.RegisterType((*FrontendToScheduler)(nil), "schedulerpb.FrontendToScheduler") proto.RegisterType((*SchedulerToFrontend)(nil), "schedulerpb.SchedulerToFrontend") proto.RegisterType((*NotifyQuerierShutdownRequest)(nil), "schedulerpb.NotifyQuerierShutdownRequest") @@ -438,48 +476,54 @@ func init() { func init() { proto.RegisterFile("scheduler.proto", fileDescriptor_2b3fc28395a6d9c5) } var fileDescriptor_2b3fc28395a6d9c5 = []byte{ - // 644 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0x4f, 0x4f, 0xdb, 0x4e, - 0x10, 0xf5, 0x86, 0x24, 0xc0, 0x84, 0xdf, 0x0f, 0x77, 0x81, 0x36, 0x8d, 0xe8, 0x12, 0x45, 0x55, - 0x95, 0x72, 0x48, 0xaa, 0xb4, 0x52, 0x7b, 0x40, 0x95, 0x52, 0x30, 0x25, 0x2a, 0x75, 0x60, 0xb3, - 0x51, 0xff, 0x5c, 0x22, 0x92, 0x2c, 0x09, 0x02, 0xbc, 0x66, 0x6d, 0x17, 0xe5, 0xd6, 0x63, 0x8f, - 0xfd, 0x18, 0xfd, 0x28, 0xbd, 0x54, 0xe2, 0xc8, 0xa1, 0x87, 0x62, 0x2e, 0x3d, 0xf2, 0x11, 0xaa, - 0x38, 0x76, 0xea, 0xa4, 0x0e, 0x70, 0x9b, 0x1d, 0xbf, 0xe7, 0x9d, 0xf7, 0x66, 0x66, 0x61, 0xde, - 0x6a, 0x75, 0x79, 0xdb, 0x39, 0xe2, 0xb2, 0x60, 0x4a, 0x61, 0x0b, 0x9c, 0x1a, 0x26, 0xcc, 0x66, - 0x66, 0xb1, 0x23, 0x3a, 0xc2, 0xcb, 0x17, 0xfb, 0xd1, 0x00, 0x92, 0x79, 0xd6, 0x39, 0xb0, 0xbb, - 0x4e, 0xb3, 0xd0, 0x12, 0xc7, 0xc5, 0x53, 0xbe, 0xf7, 0x89, 0x9f, 0x0a, 0x79, 0x68, 0x15, 0x5b, - 0xe2, 0xf8, 0x58, 0x18, 0xc5, 0xae, 0x6d, 0x9b, 0x1d, 0x69, 0xb6, 0x86, 0xc1, 0x80, 0x95, 0x2b, - 0x01, 0xde, 0x75, 0xb8, 0x3c, 0xe0, 0x92, 0x89, 0x5a, 0x70, 0x07, 0x5e, 0x86, 0xd9, 0x93, 0x41, - 0xb6, 0xb2, 0x91, 0x46, 0x59, 0x94, 0x9f, 0xa5, 0x7f, 0x13, 0xb9, 0x1f, 0x08, 0xf0, 0x10, 0xcb, - 0x84, 0xcf, 0xc7, 0x69, 0x98, 0xee, 0x63, 0x7a, 0x3e, 0x25, 0x4e, 0x83, 0x23, 0x7e, 0x0e, 0xa9, - 0xfe, 0xb5, 0x94, 0x9f, 0x38, 0xdc, 0xb2, 0xd3, 0xb1, 0x2c, 0xca, 0xa7, 0x4a, 0x4b, 0x85, 0x61, - 0x29, 0x5b, 0x8c, 0xed, 0xf8, 0x1f, 0x69, 0x18, 0x89, 0xf3, 0x30, 0xbf, 0x2f, 0x85, 0x61, 0x73, - 0xa3, 0x5d, 0x6e, 0xb7, 0x25, 0xb7, 0xac, 0xf4, 0x94, 0x57, 0xcd, 0x78, 0x1a, 0xdf, 0x85, 0xa4, - 0x63, 0x79, 0xe5, 0xc6, 0x3d, 0x80, 0x7f, 0xc2, 0x39, 0x98, 0xb3, 0xec, 0x3d, 0xdb, 0xd2, 0x8c, - 0xbd, 0xe6, 0x11, 0x6f, 0xa7, 0x13, 0x59, 0x94, 0x9f, 0xa1, 0x23, 0xb9, 0xdc, 0x97, 0x18, 0x2c, - 0x6c, 0xfa, 0xff, 0x0b, 0xbb, 0xf0, 0x02, 0xe2, 0x76, 0xcf, 0xe4, 0x9e, 0x9a, 0xff, 0x4b, 0x0f, - 0x0b, 0xa1, 0x1e, 0x14, 0x22, 0xf0, 0xac, 0x67, 0x72, 0xea, 0x31, 0xa2, 0xea, 0x8e, 0x45, 0xd7, - 0x1d, 0x32, 0x6d, 0x6a, 0xd4, 0xb4, 0x49, 0x8a, 0xc6, 0xcc, 0x4c, 0xdc, 0xda, 0xcc, 0x71, 0x2b, - 0x92, 0x11, 0x56, 0x1c, 0xc2, 0x42, 0xa8, 0xb3, 0x81, 0x48, 0xfc, 0x12, 0x92, 0x7d, 0x98, 0x63, - 0xf9, 0x5e, 0x3c, 0x1a, 0xf1, 0x22, 0x82, 0x51, 0xf3, 0xd0, 0xd4, 0x67, 0xe1, 0x45, 0x48, 0x70, - 0x29, 0x85, 0xf4, 0x5d, 0x18, 0x1c, 0x72, 0x6b, 0xb0, 0xac, 0x0b, 0xfb, 0x60, 0xbf, 0xe7, 0x4f, - 0x50, 0xad, 0xeb, 0xd8, 0x6d, 0x71, 0x6a, 0x04, 0x05, 0x5f, 0x3f, 0x85, 0x2b, 0xf0, 0x60, 0x02, - 0xdb, 0x32, 0x85, 0x61, 0xf1, 0xd5, 0x35, 0xb8, 0x37, 0xa1, 0x4b, 0x78, 0x06, 0xe2, 0x15, 0xbd, - 0xc2, 0x54, 0x05, 0xa7, 0x60, 0x5a, 0xd3, 0x77, 0xeb, 0x5a, 0x5d, 0x53, 0x11, 0x06, 0x48, 0xae, - 0x97, 0xf5, 0x75, 0x6d, 0x5b, 0x8d, 0xad, 0xb6, 0xe0, 0xfe, 0x44, 0x5d, 0x38, 0x09, 0xb1, 0xea, - 0x1b, 0x55, 0xc1, 0x59, 0x58, 0x66, 0xd5, 0x6a, 0xe3, 0x6d, 0x59, 0xff, 0xd0, 0xa0, 0xda, 0x6e, - 0x5d, 0xab, 0xb1, 0x5a, 0x63, 0x47, 0xa3, 0x0d, 0xa6, 0xe9, 0x65, 0x9d, 0xa9, 0x08, 0xcf, 0x42, - 0x42, 0xa3, 0xb4, 0x4a, 0xd5, 0x18, 0xbe, 0x03, 0xff, 0xd5, 0xb6, 0xea, 0x8c, 0x55, 0xf4, 0xd7, - 0x8d, 0x8d, 0xea, 0x3b, 0x5d, 0x9d, 0x2a, 0xfd, 0x44, 0x21, 0xbf, 0x37, 0x85, 0x0c, 0x56, 0xa9, - 0x0e, 0x29, 0x3f, 0xdc, 0x16, 0xc2, 0xc4, 0x2b, 0x23, 0x76, 0xff, 0xbb, 0xaf, 0x99, 0x95, 0x49, - 0xfd, 0xf0, 0xb1, 0x39, 0x25, 0x8f, 0x9e, 0x20, 0x6c, 0xc0, 0x52, 0xa4, 0x65, 0xf8, 0xf1, 0x08, - 0xff, 0xba, 0xa6, 0x64, 0x56, 0x6f, 0x03, 0x1d, 0x74, 0xa0, 0x64, 0xc2, 0x62, 0x58, 0xdd, 0x70, - 0x9c, 0xde, 0xc3, 0x5c, 0x10, 0x7b, 0xfa, 0xb2, 0x37, 0xad, 0x56, 0x26, 0x7b, 0xd3, 0xc0, 0x0d, - 0x14, 0xbe, 0x2a, 0x9f, 0x5d, 0x10, 0xe5, 0xfc, 0x82, 0x28, 0x57, 0x17, 0x04, 0x7d, 0x76, 0x09, - 0xfa, 0xe6, 0x12, 0xf4, 0xdd, 0x25, 0xe8, 0xcc, 0x25, 0xe8, 0x97, 0x4b, 0xd0, 0x6f, 0x97, 0x28, - 0x57, 0x2e, 0x41, 0x5f, 0x2f, 0x89, 0x72, 0x76, 0x49, 0x94, 0xf3, 0x4b, 0xa2, 0x7c, 0x0c, 0xbf, - 0xae, 0xcd, 0xa4, 0xf7, 0x30, 0x3e, 0xfd, 0x13, 0x00, 0x00, 0xff, 0xff, 0x88, 0x0c, 0xfe, 0x56, - 0x84, 0x05, 0x00, 0x00, + // 750 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x55, 0x4d, 0x4f, 0xdb, 0x4c, + 0x10, 0xf6, 0xe6, 0x0b, 0x98, 0xf0, 0x82, 0xdf, 0x05, 0xde, 0x37, 0x8d, 0xa8, 0x89, 0xac, 0x0a, + 0xa5, 0x1c, 0x92, 0x2a, 0x54, 0x2a, 0xaa, 0x50, 0xa5, 0x94, 0x98, 0x12, 0x95, 0x3a, 0xe0, 0x38, + 0x6a, 0x4b, 0x0f, 0x51, 0x3e, 0x96, 0x24, 0x22, 0xf1, 0x1a, 0x7b, 0x0d, 0xca, 0xad, 0xc7, 0x1e, + 0xfb, 0x27, 0x2a, 0xf5, 0xa7, 0xf4, 0xc8, 0x91, 0x43, 0x0f, 0xc5, 0x5c, 0x7a, 0xe4, 0x27, 0x54, + 0x76, 0xec, 0xd4, 0x09, 0x09, 0x70, 0x9b, 0x19, 0x3f, 0x33, 0x3b, 0xf3, 0x3c, 0xb3, 0x5e, 0x58, + 0x34, 0x1b, 0x6d, 0xd2, 0xb4, 0xba, 0xc4, 0xc8, 0xe8, 0x06, 0x65, 0x14, 0xc7, 0x87, 0x01, 0xbd, + 0x9e, 0x5c, 0x6e, 0xd1, 0x16, 0x75, 0xe3, 0x59, 0xc7, 0x1a, 0x40, 0x92, 0xcf, 0x5b, 0x1d, 0xd6, + 0xb6, 0xea, 0x99, 0x06, 0xed, 0x65, 0xcf, 0x49, 0xed, 0x8c, 0x9c, 0x53, 0xe3, 0xc4, 0xcc, 0x36, + 0x68, 0xaf, 0x47, 0xb5, 0x6c, 0x9b, 0x31, 0xbd, 0x65, 0xe8, 0x8d, 0xa1, 0x31, 0xc8, 0x12, 0x8f, + 0x00, 0x1f, 0x5a, 0xc4, 0xe8, 0x10, 0x43, 0xa5, 0x65, 0xff, 0x0c, 0xbc, 0x0a, 0x73, 0xa7, 0x83, + 0x68, 0xb1, 0x90, 0x40, 0x29, 0x94, 0x9e, 0x53, 0xfe, 0x06, 0xf0, 0x3a, 0x2c, 0x78, 0x4e, 0xbe, + 0xd9, 0x34, 0x88, 0x69, 0x26, 0x42, 0x2e, 0x64, 0x2c, 0x2a, 0x7e, 0x0b, 0x03, 0x1e, 0xd6, 0x54, + 0xa9, 0x77, 0x0e, 0x4e, 0xc0, 0x8c, 0x03, 0xec, 0x7b, 0xa5, 0x23, 0x8a, 0xef, 0xe2, 0x17, 0x10, + 0x77, 0xda, 0x53, 0xc8, 0xa9, 0x45, 0x4c, 0xe6, 0x56, 0x8d, 0xe7, 0x56, 0x32, 0xc3, 0x96, 0xf7, + 0x54, 0xf5, 0xc0, 0xfb, 0xa8, 0x04, 0x91, 0x38, 0x0d, 0x8b, 0xc7, 0x06, 0xd5, 0x18, 0xd1, 0x9a, + 0x7e, 0x4b, 0x61, 0xb7, 0xa5, 0xf1, 0x30, 0xfe, 0x0f, 0x62, 0x96, 0xe9, 0x8e, 0x15, 0x71, 0x01, + 0x9e, 0x87, 0x45, 0x98, 0x37, 0x59, 0x8d, 0x99, 0x92, 0x56, 0xab, 0x77, 0x49, 0x33, 0x11, 0x4d, + 0xa1, 0xf4, 0xac, 0x32, 0x12, 0xc3, 0x02, 0xc0, 0xb1, 0x51, 0x6b, 0xf5, 0x88, 0xc6, 0x8a, 0x85, + 0x44, 0xcc, 0xed, 0x3d, 0x10, 0xc1, 0x9f, 0x60, 0xa1, 0xd1, 0xee, 0x74, 0x9b, 0xc5, 0x02, 0xa3, + 0xce, 0x79, 0x66, 0x62, 0x26, 0x15, 0x4e, 0xc7, 0x73, 0x9b, 0x99, 0x80, 0x7a, 0x99, 0xdb, 0x8c, + 0x64, 0x76, 0x46, 0xb2, 0x24, 0x8d, 0x19, 0x7d, 0x65, 0xac, 0x94, 0xd3, 0x78, 0xc7, 0x54, 0x28, + 0x65, 0x89, 0x59, 0xb7, 0x35, 0xcf, 0x4b, 0xe6, 0x61, 0x69, 0x42, 0x3a, 0xe6, 0x21, 0x7c, 0x42, + 0xfa, 0x1e, 0xc1, 0x8e, 0x89, 0x97, 0x21, 0x7a, 0x56, 0xeb, 0x5a, 0xc4, 0x13, 0x6b, 0xe0, 0xbc, + 0x0c, 0x6d, 0x21, 0xf1, 0x4b, 0x08, 0x96, 0x76, 0x3d, 0x9e, 0x82, 0x5b, 0xb0, 0x05, 0x11, 0xd6, + 0xd7, 0x89, 0x5b, 0x64, 0x21, 0xf7, 0x64, 0x64, 0x8a, 0x09, 0x78, 0xb5, 0xaf, 0x13, 0xc5, 0xcd, + 0x98, 0xa4, 0x47, 0x68, 0xb2, 0x1e, 0x81, 0x65, 0x08, 0x8f, 0x2e, 0xc3, 0x34, 0xa5, 0xc6, 0x96, + 0x24, 0xfa, 0xe0, 0x25, 0x19, 0x97, 0x38, 0x76, 0x5b, 0x62, 0xf1, 0x04, 0x96, 0x02, 0xfa, 0xf8, + 0x43, 0xe2, 0x57, 0x10, 0x73, 0x60, 0x96, 0xe9, 0x71, 0xb1, 0x3e, 0x4d, 0x51, 0x3f, 0xa3, 0xec, + 0xa2, 0x15, 0x2f, 0xcb, 0xe1, 0x9e, 0x18, 0x06, 0x35, 0x7c, 0xee, 0x5d, 0x47, 0xdc, 0x86, 0x55, + 0x99, 0xb2, 0xce, 0x71, 0xdf, 0xdb, 0x83, 0x72, 0xdb, 0x62, 0x4d, 0x7a, 0xae, 0xf9, 0x0d, 0xdf, + 0x79, 0x0b, 0xc5, 0x35, 0x78, 0x3c, 0x25, 0xdb, 0xd4, 0xa9, 0x66, 0x92, 0x8d, 0x6d, 0xf8, 0x7f, + 0x8a, 0x4a, 0x78, 0x16, 0x22, 0x45, 0xb9, 0xa8, 0xf2, 0x1c, 0x8e, 0xc3, 0x8c, 0x24, 0x1f, 0x56, + 0xa4, 0x8a, 0xc4, 0x23, 0x0c, 0x10, 0xdb, 0xc9, 0xcb, 0x3b, 0xd2, 0x3e, 0x1f, 0xda, 0x68, 0xc0, + 0xa3, 0xa9, 0x73, 0xe1, 0x18, 0x84, 0x4a, 0x6f, 0x79, 0x0e, 0xa7, 0x60, 0x55, 0x2d, 0x95, 0xaa, + 0xef, 0xf2, 0xf2, 0xc7, 0xaa, 0x22, 0x1d, 0x56, 0xa4, 0xb2, 0x5a, 0xae, 0x1e, 0x48, 0x4a, 0x55, + 0x95, 0xe4, 0xbc, 0xac, 0xf2, 0x08, 0xcf, 0x41, 0x54, 0x52, 0x94, 0x92, 0xc2, 0x87, 0xf0, 0xbf, + 0xf0, 0x4f, 0x79, 0xaf, 0xa2, 0xaa, 0x45, 0xf9, 0x4d, 0xb5, 0x50, 0x7a, 0x2f, 0xf3, 0xe1, 0xdc, + 0x4f, 0x14, 0xe0, 0x7b, 0x97, 0x1a, 0xfe, 0x2f, 0xa2, 0x02, 0x71, 0xcf, 0xdc, 0xa7, 0x54, 0xc7, + 0x6b, 0x23, 0x74, 0xdf, 0xfe, 0x5f, 0x25, 0xd7, 0xee, 0xb9, 0x61, 0x22, 0x97, 0x46, 0xcf, 0x10, + 0xd6, 0x60, 0x65, 0x22, 0x65, 0xf8, 0xe9, 0x48, 0xfe, 0x5d, 0xa2, 0x24, 0x37, 0x1e, 0x02, 0x1d, + 0x28, 0x90, 0xd3, 0x61, 0x39, 0x38, 0xdd, 0x70, 0x9d, 0x3e, 0xc0, 0xbc, 0x6f, 0xbb, 0xf3, 0xa5, + 0xee, 0xbb, 0x5a, 0xc9, 0xd4, 0x7d, 0x0b, 0x37, 0x98, 0xf0, 0x75, 0xfe, 0xe2, 0x4a, 0xe0, 0x2e, + 0xaf, 0x04, 0xee, 0xe6, 0x4a, 0x40, 0x9f, 0x6d, 0x01, 0x7d, 0xb7, 0x05, 0xf4, 0xc3, 0x16, 0xd0, + 0x85, 0x2d, 0xa0, 0x5f, 0xb6, 0x80, 0x7e, 0xdb, 0x02, 0x77, 0x63, 0x0b, 0xe8, 0xeb, 0xb5, 0xc0, + 0x5d, 0x5c, 0x0b, 0xdc, 0xe5, 0xb5, 0xc0, 0x1d, 0x05, 0x5f, 0x97, 0x7a, 0xcc, 0x7d, 0x18, 0x36, + 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0xee, 0xbe, 0x64, 0x48, 0x84, 0x06, 0x00, 0x00, } func (x FrontendToSchedulerType) String() string { @@ -518,6 +562,9 @@ func (this *QuerierToScheduler) Equal(that interface{}) bool { if this.QuerierID != that1.QuerierID { return false } + if this.QuerierAddress != that1.QuerierAddress { + return false + } return true } func (this *SchedulerToQuerier) Equal(that interface{}) bool { @@ -554,6 +601,20 @@ func (this *SchedulerToQuerier) Equal(that interface{}) bool { if this.StatsEnabled != that1.StatsEnabled { return false } + if this.FragmentID != that1.FragmentID { + return false + } + if len(this.ChildIDtoAddrs) != len(that1.ChildIDtoAddrs) { + return false + } + for i := range this.ChildIDtoAddrs { + if this.ChildIDtoAddrs[i] != that1.ChildIDtoAddrs[i] { + return false + } + } + if this.IsRoot != that1.IsRoot { + return false + } return true } func (this *FrontendToScheduler) Equal(that interface{}) bool { @@ -671,9 +732,10 @@ func (this *QuerierToScheduler) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 5) + s := make([]string, 0, 6) s = append(s, "&schedulerpb.QuerierToScheduler{") s = append(s, "QuerierID: "+fmt.Sprintf("%#v", this.QuerierID)+",\n") + s = append(s, "QuerierAddress: "+fmt.Sprintf("%#v", this.QuerierAddress)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -681,7 +743,7 @@ func (this *SchedulerToQuerier) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 9) + s := make([]string, 0, 12) s = append(s, "&schedulerpb.SchedulerToQuerier{") s = append(s, "QueryID: "+fmt.Sprintf("%#v", this.QueryID)+",\n") if this.HttpRequest != nil { @@ -690,6 +752,21 @@ func (this *SchedulerToQuerier) GoString() string { s = append(s, "FrontendAddress: "+fmt.Sprintf("%#v", this.FrontendAddress)+",\n") s = append(s, "UserID: "+fmt.Sprintf("%#v", this.UserID)+",\n") s = append(s, "StatsEnabled: "+fmt.Sprintf("%#v", this.StatsEnabled)+",\n") + s = append(s, "FragmentID: "+fmt.Sprintf("%#v", this.FragmentID)+",\n") + keysForChildIDtoAddrs := make([]uint64, 0, len(this.ChildIDtoAddrs)) + for k, _ := range this.ChildIDtoAddrs { + keysForChildIDtoAddrs = append(keysForChildIDtoAddrs, k) + } + github_com_gogo_protobuf_sortkeys.Uint64s(keysForChildIDtoAddrs) + mapStringForChildIDtoAddrs := "map[uint64]string{" + for _, k := range keysForChildIDtoAddrs { + mapStringForChildIDtoAddrs += fmt.Sprintf("%#v: %#v,", k, this.ChildIDtoAddrs[k]) + } + mapStringForChildIDtoAddrs += "}" + if this.ChildIDtoAddrs != nil { + s = append(s, "ChildIDtoAddrs: "+mapStringForChildIDtoAddrs+",\n") + } + s = append(s, "IsRoot: "+fmt.Sprintf("%#v", this.IsRoot)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -1048,6 +1125,13 @@ func (m *QuerierToScheduler) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.QuerierAddress) > 0 { + i -= len(m.QuerierAddress) + copy(dAtA[i:], m.QuerierAddress) + i = encodeVarintScheduler(dAtA, i, uint64(len(m.QuerierAddress))) + i-- + dAtA[i] = 0x12 + } if len(m.QuerierID) > 0 { i -= len(m.QuerierID) copy(dAtA[i:], m.QuerierID) @@ -1078,6 +1162,38 @@ func (m *SchedulerToQuerier) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.IsRoot { + i-- + if m.IsRoot { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 + } + if len(m.ChildIDtoAddrs) > 0 { + for k := range m.ChildIDtoAddrs { + v := m.ChildIDtoAddrs[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintScheduler(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i = encodeVarintScheduler(dAtA, i, uint64(k)) + i-- + dAtA[i] = 0x8 + i = encodeVarintScheduler(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x3a + } + } + if m.FragmentID != 0 { + i = encodeVarintScheduler(dAtA, i, uint64(m.FragmentID)) + i-- + dAtA[i] = 0x30 + } if m.StatsEnabled { i-- if m.StatsEnabled { @@ -1300,6 +1416,10 @@ func (m *QuerierToScheduler) Size() (n int) { if l > 0 { n += 1 + l + sovScheduler(uint64(l)) } + l = len(m.QuerierAddress) + if l > 0 { + n += 1 + l + sovScheduler(uint64(l)) + } return n } @@ -1327,6 +1447,20 @@ func (m *SchedulerToQuerier) Size() (n int) { if m.StatsEnabled { n += 2 } + if m.FragmentID != 0 { + n += 1 + sovScheduler(uint64(m.FragmentID)) + } + if len(m.ChildIDtoAddrs) > 0 { + for k, v := range m.ChildIDtoAddrs { + _ = k + _ = v + mapEntrySize := 1 + sovScheduler(uint64(k)) + 1 + len(v) + sovScheduler(uint64(len(v))) + n += mapEntrySize + 1 + sovScheduler(uint64(mapEntrySize)) + } + } + if m.IsRoot { + n += 2 + } return n } @@ -1410,6 +1544,7 @@ func (this *QuerierToScheduler) String() string { } s := strings.Join([]string{`&QuerierToScheduler{`, `QuerierID:` + fmt.Sprintf("%v", this.QuerierID) + `,`, + `QuerierAddress:` + fmt.Sprintf("%v", this.QuerierAddress) + `,`, `}`, }, "") return s @@ -1418,12 +1553,25 @@ func (this *SchedulerToQuerier) String() string { if this == nil { return "nil" } + keysForChildIDtoAddrs := make([]uint64, 0, len(this.ChildIDtoAddrs)) + for k, _ := range this.ChildIDtoAddrs { + keysForChildIDtoAddrs = append(keysForChildIDtoAddrs, k) + } + github_com_gogo_protobuf_sortkeys.Uint64s(keysForChildIDtoAddrs) + mapStringForChildIDtoAddrs := "map[uint64]string{" + for _, k := range keysForChildIDtoAddrs { + mapStringForChildIDtoAddrs += fmt.Sprintf("%v: %v,", k, this.ChildIDtoAddrs[k]) + } + mapStringForChildIDtoAddrs += "}" s := strings.Join([]string{`&SchedulerToQuerier{`, `QueryID:` + fmt.Sprintf("%v", this.QueryID) + `,`, `HttpRequest:` + strings.Replace(fmt.Sprintf("%v", this.HttpRequest), "HTTPRequest", "httpgrpc.HTTPRequest", 1) + `,`, `FrontendAddress:` + fmt.Sprintf("%v", this.FrontendAddress) + `,`, `UserID:` + fmt.Sprintf("%v", this.UserID) + `,`, `StatsEnabled:` + fmt.Sprintf("%v", this.StatsEnabled) + `,`, + `FragmentID:` + fmt.Sprintf("%v", this.FragmentID) + `,`, + `ChildIDtoAddrs:` + mapStringForChildIDtoAddrs + `,`, + `IsRoot:` + fmt.Sprintf("%v", this.IsRoot) + `,`, `}`, }, "") return s @@ -1542,6 +1690,38 @@ func (m *QuerierToScheduler) Unmarshal(dAtA []byte) error { } m.QuerierID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field QuerierAddress", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowScheduler + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthScheduler + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthScheduler + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.QuerierAddress = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipScheduler(dAtA[iNdEx:]) @@ -1734,6 +1914,158 @@ func (m *SchedulerToQuerier) Unmarshal(dAtA []byte) error { } } m.StatsEnabled = bool(v != 0) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FragmentID", wireType) + } + m.FragmentID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowScheduler + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.FragmentID |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChildIDtoAddrs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowScheduler + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthScheduler + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthScheduler + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ChildIDtoAddrs == nil { + m.ChildIDtoAddrs = make(map[uint64]string) + } + var mapkey uint64 + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowScheduler + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowScheduler + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowScheduler + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthScheduler + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthScheduler + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipScheduler(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthScheduler + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.ChildIDtoAddrs[mapkey] = mapvalue + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IsRoot", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowScheduler + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IsRoot = bool(v != 0) default: iNdEx = preIndex skippy, err := skipScheduler(dAtA[iNdEx:]) diff --git a/pkg/scheduler/schedulerpb/scheduler.proto b/pkg/scheduler/schedulerpb/scheduler.proto index eea28717b8..c84d635008 100644 --- a/pkg/scheduler/schedulerpb/scheduler.proto +++ b/pkg/scheduler/schedulerpb/scheduler.proto @@ -27,7 +27,10 @@ service SchedulerForQuerier { // Querier reports its own clientID when it connects, so that scheduler knows how many *different* queriers are connected. // To signal that querier is ready to accept another request, querier sends empty message. message QuerierToScheduler { + string querierID = 1; + + string querierAddress = 2; } message SchedulerToQuerier { @@ -45,6 +48,16 @@ message SchedulerToQuerier { // Whether query statistics tracking should be enabled. The response will include // statistics only when this option is enabled. bool statsEnabled = 5; + + // Below are the meta data that will be used for distributed execution + // The ID of current logical query plan fragment. + uint64 fragmentID = 6; + + // The IDs and addresses of its child fragments + map childIDtoAddrs = 7; + + // Whether the current fragment is the root + bool isRoot = 8; } // Scheduler interface exposed to Frontend. Frontend can enqueue and cancel requests. diff --git a/pkg/storage/bucket/azure/config_test.go b/pkg/storage/bucket/azure/config_test.go index 8ae29ec450..5cdf00fb9a 100644 --- a/pkg/storage/bucket/azure/config_test.go +++ b/pkg/storage/bucket/azure/config_test.go @@ -85,7 +85,6 @@ http: } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { cfg := Config{} diff --git a/pkg/storage/bucket/bucket_util.go b/pkg/storage/bucket/bucket_util.go index 6b48a5ffb2..c068d08695 100644 --- a/pkg/storage/bucket/bucket_util.go +++ b/pkg/storage/bucket/bucket_util.go @@ -22,7 +22,7 @@ func DeletePrefix(ctx context.Context, bkt objstore.Bucket, prefix string, logge } result := atomic.NewInt32(0) - err = concurrency.ForEach(ctx, concurrency.CreateJobsFromStrings(keys), maxConcurrency, func(ctx context.Context, key interface{}) error { + err = concurrency.ForEach(ctx, concurrency.CreateJobsFromStrings(keys), maxConcurrency, func(ctx context.Context, key any) error { name := key.(string) if err := bkt.Delete(ctx, name); err != nil { return err diff --git a/pkg/storage/bucket/bucket_util_test.go b/pkg/storage/bucket/bucket_util_test.go index 3a5c4ab721..f48184b7d4 100644 --- a/pkg/storage/bucket/bucket_util_test.go +++ b/pkg/storage/bucket/bucket_util_test.go @@ -38,7 +38,7 @@ func TestDeletePrefixConcurrent(t *testing.T) { require.NoError(t, mem.Upload(context.Background(), "prefix/sub2/4", strings.NewReader("hello"))) require.NoError(t, mem.Upload(context.Background(), "outside/obj", strings.NewReader("hello"))) n := 10000 - for i := 0; i < n; i++ { + for i := range n { require.NoError(t, mem.Upload(context.Background(), fmt.Sprintf("prefix/sub/%d", i), strings.NewReader(fmt.Sprintf("hello%d", i)))) } diff --git a/pkg/storage/bucket/client.go b/pkg/storage/bucket/client.go index b7dc57f9f2..e13a49593c 100644 --- a/pkg/storage/bucket/client.go +++ b/pkg/storage/bucket/client.go @@ -6,6 +6,7 @@ import ( "flag" "fmt" "net/http" + "slices" "strings" "github.com/go-kit/log" @@ -18,7 +19,6 @@ import ( "github.com/cortexproject/cortex/pkg/storage/bucket/gcs" "github.com/cortexproject/cortex/pkg/storage/bucket/s3" "github.com/cortexproject/cortex/pkg/storage/bucket/swift" - "github.com/cortexproject/cortex/pkg/util" ) const ( @@ -90,7 +90,7 @@ func (cfg *Config) RegisterFlagsWithPrefixAndBackend(prefix string, f *flag.Flag } func (cfg *Config) Validate() error { - if !util.StringsContain(cfg.supportedBackends(), cfg.Backend) { + if !slices.Contains(cfg.supportedBackends(), cfg.Backend) { return ErrUnsupportedStorageBackend } diff --git a/pkg/storage/bucket/client_mock.go b/pkg/storage/bucket/client_mock.go index f323000db2..d641067ae0 100644 --- a/pkg/storage/bucket/client_mock.go +++ b/pkg/storage/bucket/client_mock.go @@ -5,6 +5,7 @@ import ( "context" "errors" "io" + "strings" "sync" "time" @@ -23,6 +24,10 @@ type ClientMock struct { uploaded sync.Map } +func (m *ClientMock) Provider() objstore.ObjProvider { + return objstore.FILESYSTEM +} + func (m *ClientMock) WithExpectedErrs(objstore.IsOpFailureExpectedFunc) objstore.Bucket { return m } @@ -32,16 +37,21 @@ func (m *ClientMock) ReaderWithExpectedErrs(objstore.IsOpFailureExpectedFunc) ob } // Upload mocks objstore.Bucket.Upload() -func (m *ClientMock) Upload(ctx context.Context, name string, r io.Reader) error { +func (m *ClientMock) Upload(ctx context.Context, name string, r io.Reader, opts ...objstore.ObjectUploadOption) error { if _, ok := m.uploaded.Load(name); ok { m.uploaded.Store(name, true) } - args := m.Called(ctx, name, r) - return args.Error(0) + if len(opts) > 0 { + args := m.Called(ctx, name, r, opts) + return args.Error(0) + } else { + args := m.Called(ctx, name, r) + return args.Error(0) + } } func (m *ClientMock) MockUpload(name string, err error) { - m.On("Upload", mock.Anything, name, mock.Anything).Return(err) + m.On("Upload", mock.Anything, name, mock.Anything, mock.Anything).Return(err) } // Delete mocks objstore.Bucket.Delete() @@ -73,6 +83,42 @@ func (m *ClientMock) Iter(ctx context.Context, dir string, f func(string) error, return args.Error(0) } +func (m *ClientMock) MockIterWithAttributes(prefix string, objects []string, err error, cb func()) { + m.On("IterWithAttributes", mock.Anything, prefix, mock.Anything, mock.Anything).Return(err).Run(func(args mock.Arguments) { + f := args.Get(2).(func(attrs objstore.IterObjectAttributes) error) + opts := args.Get(3).([]objstore.IterOption) + + // Determine if recursive flag is passed + params := objstore.ApplyIterOptions(opts...) + recursive := params.Recursive + + for _, o := range objects { + // Check if object is under current prefix + if !strings.HasPrefix(o, prefix) { + continue + } + + // Extract the remaining path after prefix + suffix := strings.TrimPrefix(o, prefix) + + // If not recursive and there's a slash in the remaining path, skip it + if !recursive && strings.Contains(suffix, "/") { + continue + } + + attrs := objstore.IterObjectAttributes{ + Name: o, + } + if cb != nil { + cb() + } + if err := f(attrs); err != nil { + break + } + } + }) +} + // MockIter is a convenient method to mock Iter() func (m *ClientMock) MockIter(prefix string, objects []string, err error) { m.MockIterWithCallback(prefix, objects, err, nil) @@ -81,6 +127,7 @@ func (m *ClientMock) MockIter(prefix string, objects []string, err error) { // MockIterWithCallback is a convenient method to mock Iter() and get a callback called when the Iter // API is called. func (m *ClientMock) MockIterWithCallback(prefix string, objects []string, err error, cb func()) { + m.MockIterWithAttributes(prefix, objects, err, cb) m.On("Iter", mock.Anything, prefix, mock.Anything, mock.Anything).Return(err).Run(func(args mock.Arguments) { if cb != nil { cb() diff --git a/pkg/storage/bucket/client_test.go b/pkg/storage/bucket/client_test.go index 78b2ea3db2..f58312b44f 100644 --- a/pkg/storage/bucket/client_test.go +++ b/pkg/storage/bucket/client_test.go @@ -76,7 +76,6 @@ func TestNewClient(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { // Load config @@ -145,7 +144,7 @@ func TestClientMock_MockGet(t *testing.T) { // Run many goroutines all requesting the same mocked object and // ensure there's no race. wg := sync.WaitGroup{} - for i := 0; i < 1000; i++ { + for range 1000 { wg.Add(1) go func() { defer wg.Done() diff --git a/pkg/storage/bucket/http/config_test.go b/pkg/storage/bucket/http/config_test.go index 2203a52acb..3594dcd752 100644 --- a/pkg/storage/bucket/http/config_test.go +++ b/pkg/storage/bucket/http/config_test.go @@ -66,7 +66,6 @@ max_connections_per_host: 8 } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { cfg := Config{} diff --git a/pkg/storage/bucket/prefixed_bucket_client.go b/pkg/storage/bucket/prefixed_bucket_client.go index ac3ca06ce3..1f979df312 100644 --- a/pkg/storage/bucket/prefixed_bucket_client.go +++ b/pkg/storage/bucket/prefixed_bucket_client.go @@ -31,8 +31,8 @@ func (b *PrefixedBucketClient) Close() error { } // Upload the contents of the reader as an object into the bucket. -func (b *PrefixedBucketClient) Upload(ctx context.Context, name string, r io.Reader) (err error) { - err = b.bucket.Upload(ctx, b.fullName(name), r) +func (b *PrefixedBucketClient) Upload(ctx context.Context, name string, r io.Reader, opts ...objstore.ObjectUploadOption) (err error) { + err = b.bucket.Upload(ctx, b.fullName(name), r, opts...) return } @@ -44,9 +44,14 @@ func (b *PrefixedBucketClient) Delete(ctx context.Context, name string) error { // Name returns the bucket name for the provider. func (b *PrefixedBucketClient) Name() string { return b.bucket.Name() } -// TODO(Sungjin1212): Implement if needed +// IterWithAttributes calls f for each entry in the given directory (not recursive.). The argument to f is the object attributes +// including the prefix of the inspected directory. The configured prefix will be stripped +// before supplied function is applied. func (b *PrefixedBucketClient) IterWithAttributes(ctx context.Context, dir string, f func(attrs objstore.IterObjectAttributes) error, options ...objstore.IterOption) error { - return b.bucket.IterWithAttributes(ctx, dir, f, options...) + return b.bucket.IterWithAttributes(ctx, b.fullName(dir), func(attrs objstore.IterObjectAttributes) error { + attrs.Name = strings.TrimPrefix(attrs.Name, b.prefix+objstore.DirDelim) + return f(attrs) + }, options...) } func (b *PrefixedBucketClient) SupportedIterOptions() []objstore.IterOptionType { @@ -109,3 +114,7 @@ func (b *PrefixedBucketClient) WithExpectedErrs(fn objstore.IsOpFailureExpectedF } return b } + +func (b *PrefixedBucketClient) Provider() objstore.ObjProvider { + return b.bucket.Provider() +} diff --git a/pkg/storage/bucket/s3/bucket_client.go b/pkg/storage/bucket/s3/bucket_client.go index 220afb9025..8d3ed4a636 100644 --- a/pkg/storage/bucket/s3/bucket_client.go +++ b/pkg/storage/bucket/s3/bucket_client.go @@ -119,6 +119,10 @@ type BucketWithRetries struct { retryMaxBackoff time.Duration } +func (b *BucketWithRetries) Provider() objstore.ObjProvider { + return b.bucket.Provider() +} + func (b *BucketWithRetries) retry(ctx context.Context, f func() error, operationInfo string) error { var lastErr error retries := backoff.New(ctx, backoff.Config{ @@ -191,12 +195,12 @@ func (b *BucketWithRetries) Exists(ctx context.Context, name string) (exists boo return } -func (b *BucketWithRetries) Upload(ctx context.Context, name string, r io.Reader) error { +func (b *BucketWithRetries) Upload(ctx context.Context, name string, r io.Reader, uploadOpts ...objstore.ObjectUploadOption) error { rs, ok := r.(io.ReadSeeker) if !ok { // Skip retry if incoming Reader is not seekable to avoid // loading entire content into memory - err := b.bucket.Upload(ctx, name, r) + err := b.bucket.Upload(ctx, name, r, uploadOpts...) if err != nil { level.Warn(b.logger).Log("msg", "skip upload retry as reader is not seekable", "file", name, "err", err) } @@ -206,7 +210,7 @@ func (b *BucketWithRetries) Upload(ctx context.Context, name string, r io.Reader if _, err := rs.Seek(0, io.SeekStart); err != nil { return err } - return b.bucket.Upload(ctx, name, rs) + return b.bucket.Upload(ctx, name, rs, uploadOpts...) }, fmt.Sprintf("Upload %s", name)) } diff --git a/pkg/storage/bucket/s3/bucket_client_test.go b/pkg/storage/bucket/s3/bucket_client_test.go index ec757100a0..50653d3266 100644 --- a/pkg/storage/bucket/s3/bucket_client_test.go +++ b/pkg/storage/bucket/s3/bucket_client_test.go @@ -184,8 +184,12 @@ type mockBucket struct { calledCount int } +func (m *mockBucket) Provider() objstore.ObjProvider { + return objstore.FILESYSTEM +} + // Upload mocks objstore.Bucket.Upload() -func (m *mockBucket) Upload(ctx context.Context, name string, r io.Reader) error { +func (m *mockBucket) Upload(ctx context.Context, name string, r io.Reader, opts ...objstore.ObjectUploadOption) error { var buf bytes.Buffer if _, err := buf.ReadFrom(r); err != nil { return err diff --git a/pkg/storage/bucket/s3/config.go b/pkg/storage/bucket/s3/config.go index df5bd33ab2..f5778f3435 100644 --- a/pkg/storage/bucket/s3/config.go +++ b/pkg/storage/bucket/s3/config.go @@ -5,6 +5,7 @@ import ( "flag" "fmt" "net/http" + "slices" "strings" "github.com/minio/minio-go/v7/pkg/encrypt" @@ -12,7 +13,6 @@ import ( "github.com/thanos-io/objstore/providers/s3" bucket_http "github.com/cortexproject/cortex/pkg/storage/bucket/http" - "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/flagext" ) @@ -103,14 +103,14 @@ func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { // Validate config and returns error on failure func (cfg *Config) Validate() error { - if !util.StringsContain(supportedSignatureVersions, cfg.SignatureVersion) { + if !slices.Contains(supportedSignatureVersions, cfg.SignatureVersion) { return errUnsupportedSignatureVersion } - if !util.StringsContain(supportedBucketLookupTypes, cfg.BucketLookupType) { + if !slices.Contains(supportedBucketLookupTypes, cfg.BucketLookupType) { return errInvalidBucketLookupType } if cfg.ListObjectsVersion != "" { - if !util.StringsContain(supportedListObjectsVersion, cfg.ListObjectsVersion) { + if !slices.Contains(supportedListObjectsVersion, cfg.ListObjectsVersion) { return errInvalidListObjectsVersion } } @@ -155,7 +155,7 @@ func (cfg *SSEConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { } func (cfg *SSEConfig) Validate() error { - if cfg.Type != "" && !util.StringsContain(supportedSSETypes, cfg.Type) { + if cfg.Type != "" && !slices.Contains(supportedSSETypes, cfg.Type) { return errUnsupportedSSEType } diff --git a/pkg/storage/bucket/s3/config_test.go b/pkg/storage/bucket/s3/config_test.go index a01a8a07b7..122f1eeac8 100644 --- a/pkg/storage/bucket/s3/config_test.go +++ b/pkg/storage/bucket/s3/config_test.go @@ -110,7 +110,6 @@ http: } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { cfg := Config{} diff --git a/pkg/storage/bucket/sse_bucket_client.go b/pkg/storage/bucket/sse_bucket_client.go index 873b74e74a..1f645ab657 100644 --- a/pkg/storage/bucket/sse_bucket_client.go +++ b/pkg/storage/bucket/sse_bucket_client.go @@ -51,7 +51,7 @@ func (b *SSEBucketClient) Close() error { } // Upload the contents of the reader as an object into the bucket. -func (b *SSEBucketClient) Upload(ctx context.Context, name string, r io.Reader) error { +func (b *SSEBucketClient) Upload(ctx context.Context, name string, r io.Reader, opts ...objstore.ObjectUploadOption) error { if sse, err := b.getCustomS3SSEConfig(); err != nil { return err } else if sse != nil { @@ -60,7 +60,11 @@ func (b *SSEBucketClient) Upload(ctx context.Context, name string, r io.Reader) ctx = s3.ContextWithSSEConfig(ctx, sse) } - return b.bucket.Upload(ctx, name, r) + return b.bucket.Upload(ctx, name, r, opts...) +} + +func (b *SSEBucketClient) Provider() objstore.ObjProvider { + return b.bucket.Provider() } // Delete implements objstore.Bucket. diff --git a/pkg/storage/tsdb/bucketindex/block_ids_fetcher.go b/pkg/storage/tsdb/bucketindex/block_ids_fetcher.go index 51a333c60c..f942b7009a 100644 --- a/pkg/storage/tsdb/bucketindex/block_ids_fetcher.go +++ b/pkg/storage/tsdb/bucketindex/block_ids_fetcher.go @@ -33,20 +33,20 @@ func NewBlockLister(logger log.Logger, bkt objstore.Bucket, userID string, cfgPr } } -func (f *BlockLister) GetActiveAndPartialBlockIDs(ctx context.Context, ch chan<- ulid.ULID) (partialBlocks map[ulid.ULID]bool, err error) { +func (f *BlockLister) GetActiveAndPartialBlockIDs(ctx context.Context, activeBlocks chan<- block.ActiveBlockFetchData) (partialBlocks map[ulid.ULID]bool, err error) { // Fetch the bucket index. idx, err := ReadIndex(ctx, f.bkt, f.userID, f.cfgProvider, f.logger) if errors.Is(err, ErrIndexNotFound) { // This is a legit case happening when the first blocks of a tenant have recently been uploaded by ingesters // and their bucket index has not been created yet. // Fallback to BaseBlockIDsFetcher. - return f.baseLister.GetActiveAndPartialBlockIDs(ctx, ch) + return f.baseLister.GetActiveAndPartialBlockIDs(ctx, activeBlocks) } if errors.Is(err, ErrIndexCorrupted) { // In case a single tenant bucket index is corrupted, we want to return empty active blocks and parital blocks, so skipping this compaction cycle level.Error(f.logger).Log("msg", "corrupted bucket index found", "user", f.userID, "err", err) // Fallback to BaseBlockIDsFetcher. - return f.baseLister.GetActiveAndPartialBlockIDs(ctx, ch) + return f.baseLister.GetActiveAndPartialBlockIDs(ctx, activeBlocks) } if errors.Is(err, bucket.ErrCustomerManagedKeyAccessDenied) { @@ -73,7 +73,7 @@ func (f *BlockLister) GetActiveAndPartialBlockIDs(ctx context.Context, ch chan<- select { case <-ctx.Done(): return nil, ctx.Err() - case ch <- b.ID: + case activeBlocks <- block.ActiveBlockFetchData{ULID: b.ID}: } } return nil, nil diff --git a/pkg/storage/tsdb/bucketindex/block_ids_fetcher_test.go b/pkg/storage/tsdb/bucketindex/block_ids_fetcher_test.go index c3673d287e..04c807f6d9 100644 --- a/pkg/storage/tsdb/bucketindex/block_ids_fetcher_test.go +++ b/pkg/storage/tsdb/bucketindex/block_ids_fetcher_test.go @@ -13,6 +13,7 @@ import ( "github.com/go-kit/log" "github.com/oklog/ulid/v2" "github.com/stretchr/testify/require" + "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" cortex_testutil "github.com/cortexproject/cortex/pkg/storage/tsdb/testutil" @@ -44,14 +45,14 @@ func TestBlockIDsFetcher_Fetch(t *testing.T) { })) blockIdsFetcher := NewBlockLister(logger, bkt, userID, nil) - ch := make(chan ulid.ULID) + ch := make(chan block.ActiveBlockFetchData) var wg sync.WaitGroup var blockIds []ulid.ULID wg.Add(1) go func() { defer wg.Done() for id := range ch { - blockIds = append(blockIds, id) + blockIds = append(blockIds, id.ULID) } }() _, err := blockIdsFetcher.GetActiveAndPartialBlockIDs(ctx, ch) @@ -96,14 +97,14 @@ func TestBlockIDsFetcherFetcher_Fetch_NoBucketIndex(t *testing.T) { require.NoError(t, bkt.Upload(ctx, path.Join(userID, mark.ID.String(), metadata.DeletionMarkFilename), &buf)) } blockIdsFetcher := NewBlockLister(logger, bkt, userID, nil) - ch := make(chan ulid.ULID) + ch := make(chan block.ActiveBlockFetchData) var wg sync.WaitGroup var blockIds []ulid.ULID wg.Add(1) go func() { defer wg.Done() for id := range ch { - blockIds = append(blockIds, id) + blockIds = append(blockIds, id.ULID) } }() _, err := blockIdsFetcher.GetActiveAndPartialBlockIDs(ctx, ch) diff --git a/pkg/storage/tsdb/bucketindex/loader_test.go b/pkg/storage/tsdb/bucketindex/loader_test.go index 088e97818a..482fc43a6c 100644 --- a/pkg/storage/tsdb/bucketindex/loader_test.go +++ b/pkg/storage/tsdb/bucketindex/loader_test.go @@ -64,7 +64,7 @@ func TestLoader_GetIndex_ShouldLazyLoadBucketIndex(t *testing.T) { )) // Request the index multiple times. - for i := 0; i < 10; i++ { + for range 10 { actualIdx, _, err := loader.GetIndex(ctx, "user-1") require.NoError(t, err) assert.Equal(t, idx, actualIdx) @@ -104,7 +104,7 @@ func TestLoader_GetIndex_ShouldCacheError(t *testing.T) { require.NoError(t, bkt.Upload(ctx, path.Join("user-1", IndexCompressedFilename), strings.NewReader("invalid!}"))) // Request the index multiple times. - for i := 0; i < 10; i++ { + for range 10 { _, _, err := loader.GetIndex(ctx, "user-1") require.Equal(t, ErrIndexCorrupted, err) } @@ -140,7 +140,7 @@ func TestLoader_GetIndex_ShouldCacheIndexNotFoundError(t *testing.T) { }) // Request the index multiple times. - for i := 0; i < 10; i++ { + for range 10 { _, _, err := loader.GetIndex(ctx, "user-1") require.Equal(t, ErrIndexNotFound, err) } @@ -242,7 +242,7 @@ func TestLoader_ShouldUpdateIndexInBackgroundOnPreviousLoadSuccess(t *testing.T) require.NoError(t, WriteIndex(ctx, bkt, "user-1", nil, idx)) // Wait until the index has been updated in background. - test.Poll(t, 3*time.Second, 2, func() interface{} { + test.Poll(t, 3*time.Second, 2, func() any { actualIdx, _, err := loader.GetIndex(ctx, "user-1") if err != nil { return 0 @@ -305,7 +305,7 @@ func TestLoader_ShouldUpdateIndexInBackgroundOnPreviousLoadFailure(t *testing.T) require.NoError(t, WriteIndex(ctx, bkt, "user-1", nil, idx)) // Wait until the index has been updated in background. - test.Poll(t, 3*time.Second, nil, func() interface{} { + test.Poll(t, 3*time.Second, nil, func() any { _, _, err := loader.GetIndex(ctx, "user-1") return err }) @@ -358,7 +358,7 @@ func TestLoader_ShouldUpdateIndexInBackgroundOnPreviousIndexNotFound(t *testing. require.NoError(t, WriteIndex(ctx, bkt, "user-1", nil, idx)) // Wait until the index has been updated in background. - test.Poll(t, 3*time.Second, nil, func() interface{} { + test.Poll(t, 3*time.Second, nil, func() any { _, _, err := loader.GetIndex(ctx, "user-1") return err }) @@ -415,7 +415,7 @@ func TestLoader_ShouldNotCacheCriticalErrorOnBackgroundUpdates(t *testing.T) { require.NoError(t, bkt.Upload(ctx, path.Join("user-1", IndexCompressedFilename), strings.NewReader("invalid!}"))) // Wait until the first failure has been tracked. - test.Poll(t, 3*time.Second, true, func() interface{} { + test.Poll(t, 3*time.Second, true, func() any { return testutil.ToFloat64(loader.loadFailures) > 0 }) @@ -472,7 +472,7 @@ func TestLoader_ShouldCacheIndexNotFoundOnBackgroundUpdates(t *testing.T) { // Wait until the next index load attempt occurs. prevLoads := testutil.ToFloat64(loader.loadAttempts) - test.Poll(t, 3*time.Second, true, func() interface{} { + test.Poll(t, 3*time.Second, true, func() any { return testutil.ToFloat64(loader.loadAttempts) > prevLoads }) @@ -531,7 +531,7 @@ func TestLoader_ShouldOffloadIndexIfNotFoundDuringBackgroundUpdates(t *testing.T require.NoError(t, DeleteIndex(ctx, bkt, "user-1", nil)) // Wait until the index is offloaded. - test.Poll(t, 3*time.Second, float64(0), func() interface{} { + test.Poll(t, 3*time.Second, float64(0), func() any { return testutil.ToFloat64(loader.loaded) }) @@ -583,7 +583,7 @@ func TestLoader_ShouldOffloadIndexIfIdleTimeoutIsReachedDuringBackgroundUpdates( assert.Equal(t, idx, actualIdx) // Wait until the index is offloaded. - test.Poll(t, 3*time.Second, float64(0), func() interface{} { + test.Poll(t, 3*time.Second, float64(0), func() any { return testutil.ToFloat64(loader.loaded) }) @@ -679,7 +679,7 @@ func TestLoader_ShouldUpdateIndexInBackgroundOnPreviousKeyAccessDenied(t *testin require.NoError(t, WriteIndex(ctx, bkt, "user-1", nil, idx)) // Wait until the index has been updated in background. - test.Poll(t, 3*time.Second, nil, func() interface{} { + test.Poll(t, 3*time.Second, nil, func() any { _, _, err := loader.GetIndex(ctx, "user-1") // Check cached require.NoError(t, loader.checkCachedIndexes(ctx)) @@ -724,7 +724,7 @@ func TestLoader_GetIndex_ShouldCacheKeyDeniedErrors(t *testing.T) { }) // Request the index multiple times. - for i := 0; i < 10; i++ { + for range 10 { _, _, err := loader.GetIndex(ctx, "user-1") require.True(t, errors.Is(err, bucket.ErrCustomerManagedKeyAccessDenied)) } diff --git a/pkg/storage/tsdb/bucketindex/markers_bucket_client.go b/pkg/storage/tsdb/bucketindex/markers_bucket_client.go index e2271cc393..1773db2a68 100644 --- a/pkg/storage/tsdb/bucketindex/markers_bucket_client.go +++ b/pkg/storage/tsdb/bucketindex/markers_bucket_client.go @@ -24,11 +24,15 @@ func BucketWithGlobalMarkers(b objstore.InstrumentedBucket) objstore.Instrumente } } +func (b *globalMarkersBucket) Provider() objstore.ObjProvider { + return b.parent.Provider() +} + // Upload implements objstore.Bucket. -func (b *globalMarkersBucket) Upload(ctx context.Context, name string, r io.Reader) error { +func (b *globalMarkersBucket) Upload(ctx context.Context, name string, r io.Reader, opts ...objstore.ObjectUploadOption) error { globalMarkPath, ok := b.isMark(name) if !ok { - return b.parent.Upload(ctx, name, r) + return b.parent.Upload(ctx, name, r, opts...) } // Read the marker. @@ -38,12 +42,12 @@ func (b *globalMarkersBucket) Upload(ctx context.Context, name string, r io.Read } // Upload it to the global marker's location. - if err := b.parent.Upload(ctx, globalMarkPath, bytes.NewReader(body)); err != nil { + if err := b.parent.Upload(ctx, globalMarkPath, bytes.NewReader(body), opts...); err != nil { return err } // Upload it to the original location too. - return b.parent.Upload(ctx, name, bytes.NewReader(body)) + return b.parent.Upload(ctx, name, bytes.NewReader(body), opts...) } // Delete implements objstore.Bucket. diff --git a/pkg/storage/tsdb/bucketindex/storage_test.go b/pkg/storage/tsdb/bucketindex/storage_test.go index e10d910e08..55f31672e8 100644 --- a/pkg/storage/tsdb/bucketindex/storage_test.go +++ b/pkg/storage/tsdb/bucketindex/storage_test.go @@ -115,7 +115,7 @@ func BenchmarkReadIndex(b *testing.B) { // Mock some blocks and deletion marks in the storage. bkt = BucketWithGlobalMarkers(bkt) - for i := 0; i < numBlocks; i++ { + for i := range numBlocks { minT := int64(i * 10) maxT := int64((i + 1) * 10) @@ -138,9 +138,7 @@ func BenchmarkReadIndex(b *testing.B) { require.Len(b, idx.Blocks, numBlocks) require.Len(b, idx.BlockDeletionMarks, numBlockDeletionMarks) - b.ResetTimer() - - for n := 0; n < b.N; n++ { + for b.Loop() { _, err := ReadIndex(ctx, bkt, userID, nil, logger) require.NoError(b, err) } diff --git a/pkg/storage/tsdb/cached_chunks_querier.go b/pkg/storage/tsdb/cached_chunks_querier.go index e5b230e64b..ab3b11c4fd 100644 --- a/pkg/storage/tsdb/cached_chunks_querier.go +++ b/pkg/storage/tsdb/cached_chunks_querier.go @@ -61,7 +61,7 @@ func newBlockBaseQuerier(b prom_tsdb.BlockReader, mint, maxt int64) (*blockBaseQ } func (q *blockBaseQuerier) LabelValues(ctx context.Context, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { - res, err := q.index.SortedLabelValues(ctx, name, matchers...) + res, err := q.index.SortedLabelValues(ctx, name, hints, matchers...) return res, nil, err } diff --git a/pkg/storage/tsdb/caching_bucket.go b/pkg/storage/tsdb/caching_bucket.go index 2efa11cff8..404438033a 100644 --- a/pkg/storage/tsdb/caching_bucket.go +++ b/pkg/storage/tsdb/caching_bucket.go @@ -5,6 +5,7 @@ import ( "fmt" "path/filepath" "regexp" + "slices" "strings" "time" @@ -21,8 +22,6 @@ import ( "github.com/thanos-io/thanos/pkg/cacheutil" "github.com/thanos-io/thanos/pkg/model" storecache "github.com/thanos-io/thanos/pkg/store/cache" - - "github.com/cortexproject/cortex/pkg/util" ) var ( @@ -62,7 +61,7 @@ func (cfg *BucketCacheBackend) Validate() error { } for _, backend := range splitBackends { - if !util.StringsContain(supportedBucketCacheBackends, backend) { + if !slices.Contains(supportedBucketCacheBackends, backend) { return errUnsupportedBucketCacheBackend } @@ -132,10 +131,7 @@ func (cfg *InMemoryBucketCacheConfig) toInMemoryCacheConfig() cache.InMemoryCach maxCacheSize := model.Bytes(cfg.MaxSizeBytes) // Calculate the max item size. - maxItemSize := defaultMaxItemSize - if maxItemSize > maxCacheSize { - maxItemSize = maxCacheSize - } + maxItemSize := min(defaultMaxItemSize, maxCacheSize) return cache.InMemoryCacheConfig{ MaxSize: maxCacheSize, diff --git a/pkg/storage/tsdb/config.go b/pkg/storage/tsdb/config.go index 7b92fe0d88..b51ad077bd 100644 --- a/pkg/storage/tsdb/config.go +++ b/pkg/storage/tsdb/config.go @@ -4,6 +4,7 @@ import ( "flag" "fmt" "path/filepath" + "slices" "strings" "time" @@ -15,7 +16,6 @@ import ( "github.com/thanos-io/thanos/pkg/store" "github.com/cortexproject/cortex/pkg/storage/bucket" - "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/flagext" util_log "github.com/cortexproject/cortex/pkg/util/log" ) @@ -285,7 +285,7 @@ type BucketStoreConfig struct { IndexCache IndexCacheConfig `yaml:"index_cache"` ChunksCache ChunksCacheConfig `yaml:"chunks_cache"` MetadataCache MetadataCacheConfig `yaml:"metadata_cache"` - ParquetLabelsCache ParquetLabelsCacheConfig `yaml:"parquet_labels_cache" doc:"hidden"` + ParquetLabelsCache ParquetLabelsCacheConfig `yaml:"parquet_labels_cache"` MatchersCacheMaxItems int `yaml:"matchers_cache_max_items"` IgnoreDeletionMarksDelay time.Duration `yaml:"ignore_deletion_mark_delay"` IgnoreBlocksWithin time.Duration `yaml:"ignore_blocks_within"` @@ -409,10 +409,10 @@ func (cfg *BucketStoreConfig) Validate() error { if err != nil { return errors.Wrap(err, "parquet-labels-cache configuration") } - if !util.StringsContain(supportedBlockDiscoveryStrategies, cfg.BlockDiscoveryStrategy) { + if !slices.Contains(supportedBlockDiscoveryStrategies, cfg.BlockDiscoveryStrategy) { return ErrInvalidBucketIndexBlockDiscoveryStrategy } - if !util.StringsContain(supportedTokenBucketBytesLimiterModes, cfg.TokenBucketBytesLimiter.Mode) { + if !slices.Contains(supportedTokenBucketBytesLimiterModes, cfg.TokenBucketBytesLimiter.Mode) { return ErrInvalidTokenBucketBytesLimiterMode } if cfg.LazyExpandedPostingGroupMaxKeySeriesRatio < 0 { diff --git a/pkg/storage/tsdb/config_test.go b/pkg/storage/tsdb/config_test.go index 35f8284d49..7a642cc600 100644 --- a/pkg/storage/tsdb/config_test.go +++ b/pkg/storage/tsdb/config_test.go @@ -148,7 +148,6 @@ func TestConfig_Validate(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { cfg := &BlocksStorageConfig{} diff --git a/pkg/storage/tsdb/expanded_postings_cache_test.go b/pkg/storage/tsdb/expanded_postings_cache_test.go index 333396b52b..abe0447402 100644 --- a/pkg/storage/tsdb/expanded_postings_cache_test.go +++ b/pkg/storage/tsdb/expanded_postings_cache_test.go @@ -64,7 +64,7 @@ func Test_ShouldFetchPromiseOnlyOnce(t *testing.T) { return 0, 0, nil } - for i := 0; i < 100; i++ { + for range 100 { go func() { defer wg.Done() cache.getPromiseForKey("key1", fetchFunc) @@ -126,7 +126,7 @@ func TestFifoCacheExpire(t *testing.T) { timeNow := time.Now cache := newFifoCache[int](c.cfg, "test", m, timeNow) - for i := 0; i < numberOfKeys; i++ { + for i := range numberOfKeys { key := RepeatStringIfNeeded(fmt.Sprintf("key%d", i), keySize) p, loaded := cache.getPromiseForKey(key, func() (int, int64, error) { return 1, 8, nil @@ -143,7 +143,7 @@ func TestFifoCacheExpire(t *testing.T) { totalCacheSize := 0 - for i := 0; i < numberOfKeys; i++ { + for i := range numberOfKeys { key := RepeatStringIfNeeded(fmt.Sprintf("key%d", i), keySize) if cache.contains(key) { totalCacheSize++ @@ -167,7 +167,7 @@ func TestFifoCacheExpire(t *testing.T) { return timeNow().Add(2 * c.cfg.Ttl) } - for i := 0; i < numberOfKeys; i++ { + for i := range numberOfKeys { key := RepeatStringIfNeeded(fmt.Sprintf("key%d", i), keySize) originalSize := cache.cachedBytes p, loaded := cache.getPromiseForKey(key, func() (int, int64, error) { @@ -213,10 +213,10 @@ func Test_memHashString(test *testing.T) { numberOfMetrics := 100 occurrences := map[uint64]int{} - for k := 0; k < 10; k++ { - for j := 0; j < numberOfMetrics; j++ { + for range 10 { + for j := range numberOfMetrics { metricName := fmt.Sprintf("metricName%v", j) - for i := 0; i < numberOfTenants; i++ { + for i := range numberOfTenants { userId := fmt.Sprintf("user%v", i) occurrences[memHashString(userId, metricName)]++ } diff --git a/pkg/storage/tsdb/index_cache.go b/pkg/storage/tsdb/index_cache.go index 20200a76ec..7c1011f74a 100644 --- a/pkg/storage/tsdb/index_cache.go +++ b/pkg/storage/tsdb/index_cache.go @@ -3,6 +3,7 @@ package tsdb import ( "flag" "fmt" + "slices" "strings" "time" @@ -14,7 +15,6 @@ import ( "github.com/thanos-io/thanos/pkg/model" storecache "github.com/thanos-io/thanos/pkg/store/cache" - "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/flagext" ) @@ -84,7 +84,7 @@ func (cfg *IndexCacheConfig) Validate() error { } for _, backend := range splitBackends { - if !util.StringsContain(supportedIndexCacheBackends, backend) { + if !slices.Contains(supportedIndexCacheBackends, backend) { return errUnsupportedIndexCacheBackend } @@ -249,10 +249,7 @@ func newInMemoryIndexCache(cfg InMemoryIndexCacheConfig, logger log.Logger, regi maxCacheSize := model.Bytes(cfg.MaxSizeBytes) // Calculate the max item size. - maxItemSize := defaultMaxItemSize - if maxItemSize > maxCacheSize { - maxItemSize = maxCacheSize - } + maxItemSize := min(defaultMaxItemSize, maxCacheSize) return NewInMemoryIndexCacheWithConfig(logger, nil, registerer, storecache.InMemoryIndexCacheConfig{ MaxSize: maxCacheSize, diff --git a/pkg/storage/tsdb/inmemory_index_cache_test.go b/pkg/storage/tsdb/inmemory_index_cache_test.go index 805a3cf42b..297e249ae0 100644 --- a/pkg/storage/tsdb/inmemory_index_cache_test.go +++ b/pkg/storage/tsdb/inmemory_index_cache_test.go @@ -24,7 +24,7 @@ import ( func TestInMemoryIndexCache_UpdateItem(t *testing.T) { var errorLogs []string - errorLogger := log.LoggerFunc(func(kvs ...interface{}) error { + errorLogger := log.LoggerFunc(func(kvs ...any) error { var lvl string for i := 0; i < len(kvs); i += 2 { if kvs[i] == "level" { @@ -135,7 +135,7 @@ func TestInMemoryIndexCacheSetOverflow(t *testing.T) { testutil.Equals(t, float64(0), prom_testutil.ToFloat64(counter)) var sb strings.Builder - for i := 0; i < 100; i++ { + for i := range 100 { sb.WriteString(strconv.Itoa(i)) } // Trigger overflow with a large value. @@ -162,8 +162,7 @@ func BenchmarkInMemoryIndexCacheStore(b *testing.B) { cache, err := newInMemoryIndexCache(cfg, logger, prometheus.NewRegistry()) require.NoError(b, err) b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { + for i := 0; b.Loop(); i++ { cache.StoreSeries(blockID, storage.SeriesRef(i), seriesData, tenancy.DefaultTenant) } }) @@ -172,8 +171,7 @@ func BenchmarkInMemoryIndexCacheStore(b *testing.B) { cache, err := storecache.NewInMemoryIndexCacheWithConfig(logger, nil, prometheus.NewRegistry(), storecache.DefaultInMemoryIndexCacheConfig) require.NoError(b, err) b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { + for i := 0; b.Loop(); i++ { cache.StoreSeries(blockID, storage.SeriesRef(i), seriesData, tenancy.DefaultTenant) } }) @@ -182,8 +180,8 @@ func BenchmarkInMemoryIndexCacheStore(b *testing.B) { cache, err := newInMemoryIndexCache(cfg, logger, prometheus.NewRegistry()) require.NoError(b, err) b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { + + for i := 0; b.Loop(); i++ { cache.StoreSeries(blockID, storage.SeriesRef(i), postingData, tenancy.DefaultTenant) } }) @@ -192,8 +190,7 @@ func BenchmarkInMemoryIndexCacheStore(b *testing.B) { cache, err := storecache.NewInMemoryIndexCacheWithConfig(logger, nil, prometheus.NewRegistry(), storecache.DefaultInMemoryIndexCacheConfig) require.NoError(b, err) b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { + for i := 0; b.Loop(); i++ { cache.StoreSeries(blockID, storage.SeriesRef(i), postingData, tenancy.DefaultTenant) } }) @@ -219,9 +216,8 @@ func BenchmarkInMemoryIndexCacheStoreConcurrent(b *testing.B) { require.NoError(b, err) ch := make(chan int) b.ReportAllocs() - b.ResetTimer() - for i := 0; i < 500; i++ { + for range 500 { go func() { for j := range ch { cache.StoreSeries(blockID, storage.SeriesRef(j), seriesData, tenancy.DefaultTenant) @@ -230,7 +226,7 @@ func BenchmarkInMemoryIndexCacheStoreConcurrent(b *testing.B) { }() } - for i := 0; i < b.N; i++ { + for i := 0; b.Loop(); i++ { ch <- i } close(ch) @@ -241,9 +237,8 @@ func BenchmarkInMemoryIndexCacheStoreConcurrent(b *testing.B) { require.NoError(b, err) ch := make(chan int) b.ReportAllocs() - b.ResetTimer() - for i := 0; i < 500; i++ { + for range 500 { go func() { for j := range ch { cache.StoreSeries(blockID, storage.SeriesRef(j), seriesData, tenancy.DefaultTenant) @@ -252,7 +247,7 @@ func BenchmarkInMemoryIndexCacheStoreConcurrent(b *testing.B) { }() } - for i := 0; i < b.N; i++ { + for i := 0; b.Loop(); i++ { ch <- i } close(ch) @@ -263,9 +258,8 @@ func BenchmarkInMemoryIndexCacheStoreConcurrent(b *testing.B) { require.NoError(b, err) ch := make(chan int) b.ReportAllocs() - b.ResetTimer() - for i := 0; i < 500; i++ { + for range 500 { go func() { for j := range ch { cache.StoreSeries(blockID, storage.SeriesRef(j), postingData, tenancy.DefaultTenant) @@ -274,7 +268,7 @@ func BenchmarkInMemoryIndexCacheStoreConcurrent(b *testing.B) { }() } - for i := 0; i < b.N; i++ { + for i := 0; b.Loop(); i++ { ch <- i } close(ch) @@ -285,9 +279,8 @@ func BenchmarkInMemoryIndexCacheStoreConcurrent(b *testing.B) { require.NoError(b, err) ch := make(chan int) b.ReportAllocs() - b.ResetTimer() - for i := 0; i < 500; i++ { + for range 500 { go func() { for j := range ch { cache.StoreSeries(blockID, storage.SeriesRef(j), postingData, tenancy.DefaultTenant) @@ -296,7 +289,7 @@ func BenchmarkInMemoryIndexCacheStoreConcurrent(b *testing.B) { }() } - for i := 0; i < b.N; i++ { + for i := 0; b.Loop(); i++ { ch <- i } close(ch) @@ -317,19 +310,18 @@ func BenchmarkInMemoryIndexCacheFetch(b *testing.B) { ctx := context.Background() items := 10000 ids := make([]storage.SeriesRef, items) - for i := 0; i < items; i++ { + for i := range items { ids[i] = storage.SeriesRef(i) } b.Run("FastCache", func(b *testing.B) { cache, err := newInMemoryIndexCache(cfg, logger, prometheus.NewRegistry()) require.NoError(b, err) - for i := 0; i < items; i++ { + for i := range items { cache.StoreSeries(blockID, storage.SeriesRef(i), seriesData, tenancy.DefaultTenant) } b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { cache.FetchMultiSeries(ctx, blockID, ids, tenancy.DefaultTenant) } }) @@ -337,12 +329,11 @@ func BenchmarkInMemoryIndexCacheFetch(b *testing.B) { b.Run("ThanosCache", func(b *testing.B) { cache, err := storecache.NewInMemoryIndexCacheWithConfig(logger, nil, prometheus.NewRegistry(), storecache.DefaultInMemoryIndexCacheConfig) require.NoError(b, err) - for i := 0; i < items; i++ { + for i := range items { cache.StoreSeries(blockID, storage.SeriesRef(i), seriesData, tenancy.DefaultTenant) } b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { cache.FetchMultiSeries(ctx, blockID, ids, tenancy.DefaultTenant) } }) @@ -362,21 +353,20 @@ func BenchmarkInMemoryIndexCacheFetchConcurrent(b *testing.B) { ctx := context.Background() items := 10000 ids := make([]storage.SeriesRef, items) - for i := 0; i < items; i++ { + for i := range items { ids[i] = storage.SeriesRef(i) } b.Run("FastCache", func(b *testing.B) { cache, err := newInMemoryIndexCache(cfg, logger, prometheus.NewRegistry()) require.NoError(b, err) - for i := 0; i < items; i++ { + for i := range items { cache.StoreSeries(blockID, storage.SeriesRef(i), seriesData, tenancy.DefaultTenant) } b.ReportAllocs() - b.ResetTimer() ch := make(chan int) - for i := 0; i < 500; i++ { + for range 500 { go func() { for range ch { cache.FetchMultiSeries(ctx, blockID, ids, tenancy.DefaultTenant) @@ -384,7 +374,7 @@ func BenchmarkInMemoryIndexCacheFetchConcurrent(b *testing.B) { }() } - for i := 0; i < b.N; i++ { + for i := 0; b.Loop(); i++ { ch <- i } close(ch) @@ -393,14 +383,13 @@ func BenchmarkInMemoryIndexCacheFetchConcurrent(b *testing.B) { b.Run("ThanosCache", func(b *testing.B) { cache, err := storecache.NewInMemoryIndexCacheWithConfig(logger, nil, prometheus.NewRegistry(), storecache.DefaultInMemoryIndexCacheConfig) require.NoError(b, err) - for i := 0; i < items; i++ { + for i := range items { cache.StoreSeries(blockID, storage.SeriesRef(i), seriesData, tenancy.DefaultTenant) } b.ReportAllocs() - b.ResetTimer() ch := make(chan int) - for i := 0; i < 500; i++ { + for range 500 { go func() { for range ch { cache.FetchMultiSeries(ctx, blockID, ids, tenancy.DefaultTenant) @@ -408,7 +397,7 @@ func BenchmarkInMemoryIndexCacheFetchConcurrent(b *testing.B) { }() } - for i := 0; i < b.N; i++ { + for i := 0; b.Loop(); i++ { ch <- i } close(ch) diff --git a/pkg/storage/tsdb/meta_extensions_test.go b/pkg/storage/tsdb/meta_extensions_test.go index 6f296eb461..f6108170e7 100644 --- a/pkg/storage/tsdb/meta_extensions_test.go +++ b/pkg/storage/tsdb/meta_extensions_test.go @@ -139,7 +139,7 @@ func TestGetPartitionedInfo(t *testing.T) { PartitionedGroupID uint32 `json:"partitionedGroupId"` PartitionCount int `json:"partitionCount"` PartitionID int `json:"partitionId"` - } `json:"partition_info,omitempty"` + } `json:"partition_info"` }{ PartitionInfo: struct { PartitionedGroupID uint32 `json:"partitionedGroupId"` diff --git a/pkg/storage/tsdb/multilevel_bucket_cache.go b/pkg/storage/tsdb/multilevel_bucket_cache.go index 83fcfddce7..f9e2b4fbfd 100644 --- a/pkg/storage/tsdb/multilevel_bucket_cache.go +++ b/pkg/storage/tsdb/multilevel_bucket_cache.go @@ -5,6 +5,7 @@ import ( "errors" "flag" "fmt" + "maps" "time" "github.com/prometheus/client_golang/prometheus" @@ -127,9 +128,7 @@ func (m *multiLevelBucketCache) Fetch(ctx context.Context, keys []string) map[st return nil } if data := c.Fetch(ctx, missingKeys); len(data) > 0 { - for k, d := range data { - hits[k] = d - } + maps.Copy(hits, data) if i > 0 && len(hits) > 0 { // lets fetch only the mising keys @@ -142,9 +141,7 @@ func (m *multiLevelBucketCache) Fetch(ctx context.Context, keys []string) map[st missingKeys = m - for k, b := range hits { - backfillItems[i-1][k] = b - } + maps.Copy(backfillItems[i-1], hits) } if len(hits) == len(keys) { diff --git a/pkg/storage/tsdb/multilevel_index_cache.go b/pkg/storage/tsdb/multilevel_index_cache.go index f46610fc24..bab35f7471 100644 --- a/pkg/storage/tsdb/multilevel_index_cache.go +++ b/pkg/storage/tsdb/multilevel_index_cache.go @@ -3,6 +3,7 @@ package tsdb import ( "context" "errors" + "maps" "slices" "github.com/oklog/ulid/v2" @@ -54,9 +55,7 @@ func (m *multiLevelCache) FetchMultiPostings(ctx context.Context, blockID ulid.U h, mi := c.FetchMultiPostings(ctx, blockID, misses, tenant) misses = mi - for label, bytes := range h { - hits[label] = bytes - } + maps.Copy(hits, h) if i > 0 { backfillItems[i-1] = h @@ -71,8 +70,6 @@ func (m *multiLevelCache) FetchMultiPostings(ctx context.Context, blockID ulid.U backFillTimer := prometheus.NewTimer(m.backFillLatency.WithLabelValues(storecache.CacheTypePostings)) defer backFillTimer.ObserveDuration() for i, values := range backfillItems { - i := i - values := values if len(values) == 0 { continue } @@ -160,9 +157,7 @@ func (m *multiLevelCache) FetchMultiSeries(ctx context.Context, blockID ulid.ULI h, miss := c.FetchMultiSeries(ctx, blockID, misses, tenant) misses = miss - for label, bytes := range h { - hits[label] = bytes - } + maps.Copy(hits, h) if i > 0 && len(h) > 0 { backfillItems[i-1] = h @@ -177,8 +172,6 @@ func (m *multiLevelCache) FetchMultiSeries(ctx context.Context, blockID ulid.ULI backFillTimer := prometheus.NewTimer(m.backFillLatency.WithLabelValues(storecache.CacheTypeSeries)) defer backFillTimer.ObserveDuration() for i, values := range backfillItems { - i := i - values := values if len(values) == 0 { continue } diff --git a/pkg/storage/tsdb/multilevel_index_cache_test.go b/pkg/storage/tsdb/multilevel_index_cache_test.go index 4d05dfaae0..781323b408 100644 --- a/pkg/storage/tsdb/multilevel_index_cache_test.go +++ b/pkg/storage/tsdb/multilevel_index_cache_test.go @@ -159,18 +159,18 @@ func Test_MultiLevelCache(t *testing.T) { v2 := make([]byte, 200) testCases := map[string]struct { - m1ExpectedCalls map[string][][]interface{} - m2ExpectedCalls map[string][][]interface{} - m1MockedCalls map[string][]interface{} - m2MockedCalls map[string][]interface{} + m1ExpectedCalls map[string][][]any + m2ExpectedCalls map[string][][]any + m1MockedCalls map[string][]any + m2MockedCalls map[string][]any enabledItems [][]string call func(storecache.IndexCache) }{ "[StorePostings] Should store on all caches": { - m1ExpectedCalls: map[string][][]interface{}{ + m1ExpectedCalls: map[string][][]any{ "StorePostings": {{bID, l1, v}}, }, - m2ExpectedCalls: map[string][][]interface{}{ + m2ExpectedCalls: map[string][][]any{ "StorePostings": {{bID, l1, v}}, }, call: func(cache storecache.IndexCache) { @@ -178,8 +178,8 @@ func Test_MultiLevelCache(t *testing.T) { }, }, "[StorePostings] Should store on m2 only": { - m1ExpectedCalls: map[string][][]interface{}{}, - m2ExpectedCalls: map[string][][]interface{}{ + m1ExpectedCalls: map[string][][]any{}, + m2ExpectedCalls: map[string][][]any{ "StorePostings": {{bID, l1, v}}, }, enabledItems: [][]string{ @@ -191,10 +191,10 @@ func Test_MultiLevelCache(t *testing.T) { }, }, "[StoreSeries] Should store on all caches": { - m1ExpectedCalls: map[string][][]interface{}{ + m1ExpectedCalls: map[string][][]any{ "StoreSeries": {{bID, storage.SeriesRef(1), v}}, }, - m2ExpectedCalls: map[string][][]interface{}{ + m2ExpectedCalls: map[string][][]any{ "StoreSeries": {{bID, storage.SeriesRef(1), v}}, }, call: func(cache storecache.IndexCache) { @@ -202,8 +202,8 @@ func Test_MultiLevelCache(t *testing.T) { }, }, "[StoreSeries] Should store on m2 only": { - m1ExpectedCalls: map[string][][]interface{}{}, - m2ExpectedCalls: map[string][][]interface{}{ + m1ExpectedCalls: map[string][][]any{}, + m2ExpectedCalls: map[string][][]any{ "StoreSeries": {{bID, storage.SeriesRef(1), v}}, }, enabledItems: [][]string{ @@ -215,10 +215,10 @@ func Test_MultiLevelCache(t *testing.T) { }, }, "[StoreExpandedPostings] Should store on all caches": { - m1ExpectedCalls: map[string][][]interface{}{ + m1ExpectedCalls: map[string][][]any{ "StoreExpandedPostings": {{bID, []*labels.Matcher{matcher}, v}}, }, - m2ExpectedCalls: map[string][][]interface{}{ + m2ExpectedCalls: map[string][][]any{ "StoreExpandedPostings": {{bID, []*labels.Matcher{matcher}, v}}, }, call: func(cache storecache.IndexCache) { @@ -226,8 +226,8 @@ func Test_MultiLevelCache(t *testing.T) { }, }, "[StoreExpandedPostings] Should store on m2 only": { - m1ExpectedCalls: map[string][][]interface{}{}, - m2ExpectedCalls: map[string][][]interface{}{ + m1ExpectedCalls: map[string][][]any{}, + m2ExpectedCalls: map[string][][]any{ "StoreExpandedPostings": {{bID, []*labels.Matcher{matcher}, v}}, }, enabledItems: [][]string{ @@ -239,10 +239,10 @@ func Test_MultiLevelCache(t *testing.T) { }, }, "[FetchMultiPostings] Should fallback when all misses": { - m1ExpectedCalls: map[string][][]interface{}{ + m1ExpectedCalls: map[string][][]any{ "FetchMultiPostings": {{bID, []labels.Label{l1, l2}}}, }, - m2ExpectedCalls: map[string][][]interface{}{ + m2ExpectedCalls: map[string][][]any{ "FetchMultiPostings": {{bID, []labels.Label{l1, l2}}}, }, call: func(cache storecache.IndexCache) { @@ -250,17 +250,17 @@ func Test_MultiLevelCache(t *testing.T) { }, }, "[FetchMultiPostings] should fallback and backfill only the missing keys on l1": { - m1ExpectedCalls: map[string][][]interface{}{ + m1ExpectedCalls: map[string][][]any{ "FetchMultiPostings": {{bID, []labels.Label{l1, l2}}}, "StorePostings": {{bID, l2, v}}, }, - m2ExpectedCalls: map[string][][]interface{}{ + m2ExpectedCalls: map[string][][]any{ "FetchMultiPostings": {{bID, []labels.Label{l2}}}, }, - m1MockedCalls: map[string][]interface{}{ + m1MockedCalls: map[string][]any{ "FetchMultiPostings": {map[labels.Label][]byte{l1: make([]byte, 1)}, []labels.Label{l2}}, }, - m2MockedCalls: map[string][]interface{}{ + m2MockedCalls: map[string][]any{ "FetchMultiPostings": {map[labels.Label][]byte{l2: v}, []labels.Label{}}, }, call: func(cache storecache.IndexCache) { @@ -268,17 +268,17 @@ func Test_MultiLevelCache(t *testing.T) { }, }, "[FetchMultiPostings] should fallback and backfill only the missing keys on l1, multiple items": { - m1ExpectedCalls: map[string][][]interface{}{ + m1ExpectedCalls: map[string][][]any{ "FetchMultiPostings": {{bID, []labels.Label{l1, l2, l3}}}, "StorePostings": {{bID, l2, v}, {bID, l3, v2}}, }, - m2ExpectedCalls: map[string][][]interface{}{ + m2ExpectedCalls: map[string][][]any{ "FetchMultiPostings": {{bID, []labels.Label{l2, l3}}}, }, - m1MockedCalls: map[string][]interface{}{ + m1MockedCalls: map[string][]any{ "FetchMultiPostings": {map[labels.Label][]byte{l1: make([]byte, 1)}, []labels.Label{l2, l3}}, }, - m2MockedCalls: map[string][]interface{}{ + m2MockedCalls: map[string][]any{ "FetchMultiPostings": {map[labels.Label][]byte{l2: v, l3: v2}, []labels.Label{}}, }, call: func(cache storecache.IndexCache) { @@ -286,12 +286,12 @@ func Test_MultiLevelCache(t *testing.T) { }, }, "[FetchMultiPostings] m1 doesn't enable postings": { - m1ExpectedCalls: map[string][][]interface{}{}, - m2ExpectedCalls: map[string][][]interface{}{ + m1ExpectedCalls: map[string][][]any{}, + m2ExpectedCalls: map[string][][]any{ "FetchMultiPostings": {{bID, []labels.Label{l1, l2, l3}}}, }, - m1MockedCalls: map[string][]interface{}{}, - m2MockedCalls: map[string][]interface{}{ + m1MockedCalls: map[string][]any{}, + m2MockedCalls: map[string][]any{ "FetchMultiPostings": {map[labels.Label][]byte{l1: v, l2: v, l3: v2}, []labels.Label{}}, }, enabledItems: [][]string{ @@ -303,11 +303,11 @@ func Test_MultiLevelCache(t *testing.T) { }, }, "[FetchMultiPostings] should not fallback when all hit on l1": { - m1ExpectedCalls: map[string][][]interface{}{ + m1ExpectedCalls: map[string][][]any{ "FetchMultiPostings": {{bID, []labels.Label{l1, l2}}}, }, - m2ExpectedCalls: map[string][][]interface{}{}, - m1MockedCalls: map[string][]interface{}{ + m2ExpectedCalls: map[string][][]any{}, + m1MockedCalls: map[string][]any{ "FetchMultiPostings": {map[labels.Label][]byte{l1: make([]byte, 1), l2: make([]byte, 1)}, []labels.Label{}}, }, call: func(cache storecache.IndexCache) { @@ -315,10 +315,10 @@ func Test_MultiLevelCache(t *testing.T) { }, }, "[FetchMultiSeries] Should fallback when all misses": { - m1ExpectedCalls: map[string][][]interface{}{ + m1ExpectedCalls: map[string][][]any{ "FetchMultiSeries": {{bID, []storage.SeriesRef{1, 2}}}, }, - m2ExpectedCalls: map[string][][]interface{}{ + m2ExpectedCalls: map[string][][]any{ "FetchMultiSeries": {{bID, []storage.SeriesRef{1, 2}}}, }, call: func(cache storecache.IndexCache) { @@ -326,17 +326,17 @@ func Test_MultiLevelCache(t *testing.T) { }, }, "[FetchMultiSeries] should fallback and backfill only the missing keys on l1": { - m1ExpectedCalls: map[string][][]interface{}{ + m1ExpectedCalls: map[string][][]any{ "FetchMultiSeries": {{bID, []storage.SeriesRef{1, 2}}}, "StoreSeries": {{bID, storage.SeriesRef(2), v}}, }, - m2ExpectedCalls: map[string][][]interface{}{ + m2ExpectedCalls: map[string][][]any{ "FetchMultiSeries": {{bID, []storage.SeriesRef{2}}}, }, - m1MockedCalls: map[string][]interface{}{ + m1MockedCalls: map[string][]any{ "FetchMultiSeries": {map[storage.SeriesRef][]byte{1: v}, []storage.SeriesRef{2}}, }, - m2MockedCalls: map[string][]interface{}{ + m2MockedCalls: map[string][]any{ "FetchMultiSeries": {map[storage.SeriesRef][]byte{2: v}, []storage.SeriesRef{}}, }, call: func(cache storecache.IndexCache) { @@ -344,20 +344,20 @@ func Test_MultiLevelCache(t *testing.T) { }, }, "[FetchMultiSeries] should fallback and backfill only the missing keys on l1, multiple items": { - m1ExpectedCalls: map[string][][]interface{}{ + m1ExpectedCalls: map[string][][]any{ "FetchMultiSeries": {{bID, []storage.SeriesRef{1, 2, 3}}}, "StoreSeries": { {bID, storage.SeriesRef(2), v}, {bID, storage.SeriesRef(3), v2}, }, }, - m2ExpectedCalls: map[string][][]interface{}{ + m2ExpectedCalls: map[string][][]any{ "FetchMultiSeries": {{bID, []storage.SeriesRef{2, 3}}}, }, - m1MockedCalls: map[string][]interface{}{ + m1MockedCalls: map[string][]any{ "FetchMultiSeries": {map[storage.SeriesRef][]byte{1: v}, []storage.SeriesRef{2, 3}}, }, - m2MockedCalls: map[string][]interface{}{ + m2MockedCalls: map[string][]any{ "FetchMultiSeries": {map[storage.SeriesRef][]byte{2: v, 3: v2}, []storage.SeriesRef{}}, }, call: func(cache storecache.IndexCache) { @@ -365,12 +365,12 @@ func Test_MultiLevelCache(t *testing.T) { }, }, "[FetchMultiPostings] m1 doesn't enable series": { - m1ExpectedCalls: map[string][][]interface{}{}, - m2ExpectedCalls: map[string][][]interface{}{ + m1ExpectedCalls: map[string][][]any{}, + m2ExpectedCalls: map[string][][]any{ "FetchMultiSeries": {{bID, []storage.SeriesRef{1, 2, 3}}}, }, - m1MockedCalls: map[string][]interface{}{}, - m2MockedCalls: map[string][]interface{}{ + m1MockedCalls: map[string][]any{}, + m2MockedCalls: map[string][]any{ "FetchMultiSeries": {map[storage.SeriesRef][]byte{1: v, 2: v, 3: v2}, []storage.SeriesRef{}}, }, enabledItems: [][]string{ @@ -382,11 +382,11 @@ func Test_MultiLevelCache(t *testing.T) { }, }, "[FetchMultiSeries] should not fallback when all hit on l1": { - m1ExpectedCalls: map[string][][]interface{}{ + m1ExpectedCalls: map[string][][]any{ "FetchMultiSeries": {{bID, []storage.SeriesRef{1, 2}}}, }, - m2ExpectedCalls: map[string][][]interface{}{}, - m1MockedCalls: map[string][]interface{}{ + m2ExpectedCalls: map[string][][]any{}, + m1MockedCalls: map[string][]any{ "FetchMultiSeries": {map[storage.SeriesRef][]byte{1: make([]byte, 1), 2: make([]byte, 1)}, []storage.SeriesRef{}}, }, call: func(cache storecache.IndexCache) { @@ -394,14 +394,14 @@ func Test_MultiLevelCache(t *testing.T) { }, }, "[FetchExpandedPostings] Should fallback and backfill when miss": { - m1ExpectedCalls: map[string][][]interface{}{ + m1ExpectedCalls: map[string][][]any{ "StoreExpandedPostings": {{bID, []*labels.Matcher{matcher}, v}}, "FetchExpandedPostings": {{bID, []*labels.Matcher{matcher}}}, }, - m2ExpectedCalls: map[string][][]interface{}{ + m2ExpectedCalls: map[string][][]any{ "FetchExpandedPostings": {{bID, []*labels.Matcher{matcher}}}, }, - m2MockedCalls: map[string][]interface{}{ + m2MockedCalls: map[string][]any{ "FetchExpandedPostings": {v, true}, }, call: func(cache storecache.IndexCache) { @@ -409,11 +409,11 @@ func Test_MultiLevelCache(t *testing.T) { }, }, "[FetchExpandedPostings] should not fallback when all hit on l1": { - m1ExpectedCalls: map[string][][]interface{}{ + m1ExpectedCalls: map[string][][]any{ "FetchExpandedPostings": {{bID, []*labels.Matcher{matcher}}}, }, - m2ExpectedCalls: map[string][][]interface{}{}, - m1MockedCalls: map[string][]interface{}{ + m2ExpectedCalls: map[string][][]any{}, + m1MockedCalls: map[string][]any{ "FetchExpandedPostings": {[]byte{}, true}, }, call: func(cache storecache.IndexCache) { @@ -421,12 +421,12 @@ func Test_MultiLevelCache(t *testing.T) { }, }, "[FetchExpandedPostings] m1 doesn't enable expanded postings": { - m1ExpectedCalls: map[string][][]interface{}{}, - m2ExpectedCalls: map[string][][]interface{}{ + m1ExpectedCalls: map[string][][]any{}, + m2ExpectedCalls: map[string][][]any{ "FetchExpandedPostings": {{bID, []*labels.Matcher{matcher}}}, }, - m1MockedCalls: map[string][]interface{}{}, - m2MockedCalls: map[string][]interface{}{ + m1MockedCalls: map[string][]any{}, + m2MockedCalls: map[string][]any{ "FetchExpandedPostings": {[]byte{}, true}, }, enabledItems: [][]string{ @@ -475,29 +475,29 @@ func Test_MultiLevelCache(t *testing.T) { } } -func newMockIndexCache(mockedCalls map[string][]interface{}) *mockIndexCache { +func newMockIndexCache(mockedCalls map[string][]any) *mockIndexCache { return &mockIndexCache{ - calls: map[string][][]interface{}{}, + calls: map[string][][]any{}, mockedCalls: mockedCalls, } } type mockIndexCache struct { mtx sync.Mutex - calls map[string][][]interface{} - mockedCalls map[string][]interface{} + calls map[string][][]any + mockedCalls map[string][]any } func (m *mockIndexCache) StorePostings(blockID ulid.ULID, l labels.Label, v []byte, tenant string) { m.mtx.Lock() defer m.mtx.Unlock() - m.calls["StorePostings"] = append(m.calls["StorePostings"], []interface{}{blockID, l, v}) + m.calls["StorePostings"] = append(m.calls["StorePostings"], []any{blockID, l, v}) } func (m *mockIndexCache) FetchMultiPostings(_ context.Context, blockID ulid.ULID, keys []labels.Label, tenant string) (hits map[labels.Label][]byte, misses []labels.Label) { m.mtx.Lock() defer m.mtx.Unlock() - m.calls["FetchMultiPostings"] = append(m.calls["FetchMultiPostings"], []interface{}{blockID, keys}) + m.calls["FetchMultiPostings"] = append(m.calls["FetchMultiPostings"], []any{blockID, keys}) if m, ok := m.mockedCalls["FetchMultiPostings"]; ok { return m[0].(map[labels.Label][]byte), m[1].([]labels.Label) } @@ -508,13 +508,13 @@ func (m *mockIndexCache) FetchMultiPostings(_ context.Context, blockID ulid.ULID func (m *mockIndexCache) StoreExpandedPostings(blockID ulid.ULID, matchers []*labels.Matcher, v []byte, tenant string) { m.mtx.Lock() defer m.mtx.Unlock() - m.calls["StoreExpandedPostings"] = append(m.calls["StoreExpandedPostings"], []interface{}{blockID, matchers, v}) + m.calls["StoreExpandedPostings"] = append(m.calls["StoreExpandedPostings"], []any{blockID, matchers, v}) } func (m *mockIndexCache) FetchExpandedPostings(_ context.Context, blockID ulid.ULID, matchers []*labels.Matcher, tenant string) ([]byte, bool) { m.mtx.Lock() defer m.mtx.Unlock() - m.calls["FetchExpandedPostings"] = append(m.calls["FetchExpandedPostings"], []interface{}{blockID, matchers}) + m.calls["FetchExpandedPostings"] = append(m.calls["FetchExpandedPostings"], []any{blockID, matchers}) if m, ok := m.mockedCalls["FetchExpandedPostings"]; ok { return m[0].([]byte), m[1].(bool) } @@ -525,13 +525,13 @@ func (m *mockIndexCache) FetchExpandedPostings(_ context.Context, blockID ulid.U func (m *mockIndexCache) StoreSeries(blockID ulid.ULID, id storage.SeriesRef, v []byte, tenant string) { m.mtx.Lock() defer m.mtx.Unlock() - m.calls["StoreSeries"] = append(m.calls["StoreSeries"], []interface{}{blockID, id, v}) + m.calls["StoreSeries"] = append(m.calls["StoreSeries"], []any{blockID, id, v}) } func (m *mockIndexCache) FetchMultiSeries(_ context.Context, blockID ulid.ULID, ids []storage.SeriesRef, tenant string) (hits map[storage.SeriesRef][]byte, misses []storage.SeriesRef) { m.mtx.Lock() defer m.mtx.Unlock() - m.calls["FetchMultiSeries"] = append(m.calls["FetchMultiSeries"], []interface{}{blockID, ids}) + m.calls["FetchMultiSeries"] = append(m.calls["FetchMultiSeries"], []any{blockID, ids}) if m, ok := m.mockedCalls["FetchMultiSeries"]; ok { return m[0].(map[storage.SeriesRef][]byte), m[1].([]storage.SeriesRef) } diff --git a/pkg/storage/tsdb/testutil/objstore.go b/pkg/storage/tsdb/testutil/objstore.go index d879ab2bb4..0892d19b6f 100644 --- a/pkg/storage/tsdb/testutil/objstore.go +++ b/pkg/storage/tsdb/testutil/objstore.go @@ -4,6 +4,7 @@ import ( "context" "io" "os" + "slices" "strings" "testing" @@ -12,8 +13,6 @@ import ( "github.com/thanos-io/objstore" "go.uber.org/atomic" - "github.com/cortexproject/cortex/pkg/util" - "github.com/cortexproject/cortex/pkg/storage/bucket/filesystem" ) @@ -45,7 +44,7 @@ type MockBucketFailure struct { } func (m *MockBucketFailure) Delete(ctx context.Context, name string) error { - if util.StringsContain(m.DeleteFailures, name) { + if slices.Contains(m.DeleteFailures, name) { return errors.New("mocked delete failure") } return m.Bucket.Delete(ctx, name) @@ -79,7 +78,7 @@ func (m *MockBucketFailure) Get(ctx context.Context, name string) (io.ReadCloser return m.Bucket.Get(ctx, name) } -func (m *MockBucketFailure) Upload(ctx context.Context, name string, r io.Reader) error { +func (m *MockBucketFailure) Upload(ctx context.Context, name string, r io.Reader, opts ...objstore.ObjectUploadOption) error { m.UploadCalls.Add(1) for prefix, err := range m.UploadFailures { if strings.HasPrefix(name, prefix) { @@ -90,7 +89,7 @@ func (m *MockBucketFailure) Upload(ctx context.Context, name string, r io.Reader return e } - return m.Bucket.Upload(ctx, name, r) + return m.Bucket.Upload(ctx, name, r, opts...) } func (m *MockBucketFailure) WithExpectedErrs(expectedFunc objstore.IsOpFailureExpectedFunc) objstore.Bucket { diff --git a/pkg/storage/tsdb/users/cache_test.go b/pkg/storage/tsdb/users/cache_test.go index 9b1f0d7d42..6ef5588c92 100644 --- a/pkg/storage/tsdb/users/cache_test.go +++ b/pkg/storage/tsdb/users/cache_test.go @@ -69,7 +69,6 @@ func TestCachedScanner_ScanUsers(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { t.Parallel() @@ -132,7 +131,7 @@ func TestCachedScanner_ConcurrentAccess(t *testing.T) { const goroutines = 10 done := make(chan struct{}) - for i := 0; i < goroutines; i++ { + for range goroutines { go func() { defer func() { done <- struct{}{} }() @@ -145,7 +144,7 @@ func TestCachedScanner_ConcurrentAccess(t *testing.T) { } // Wait for all goroutines to complete - for i := 0; i < goroutines; i++ { + for range goroutines { <-done } diff --git a/pkg/storage/tsdb/users/scanner_test.go b/pkg/storage/tsdb/users/scanner_test.go index 433f85d3ae..6f90623734 100644 --- a/pkg/storage/tsdb/users/scanner_test.go +++ b/pkg/storage/tsdb/users/scanner_test.go @@ -74,7 +74,6 @@ func TestListScanner_ScanUsers(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { t.Parallel() @@ -187,7 +186,6 @@ func TestUserIndexScanner_ScanUsers(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { t.Parallel() @@ -268,7 +266,6 @@ func TestShardedScanner_ScanUsers(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { t.Parallel() diff --git a/pkg/storage/tsdb/users/updater_test.go b/pkg/storage/tsdb/users/updater_test.go index 1828597b05..c5273c8e03 100644 --- a/pkg/storage/tsdb/users/updater_test.go +++ b/pkg/storage/tsdb/users/updater_test.go @@ -73,7 +73,6 @@ func TestUserIndexUpdater_UpdateUserIndex(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { t.Parallel() bkt, _ := cortex_testutil.PrepareFilesystemBucket(t) diff --git a/pkg/storage/tsdb/users_scanner_config_test.go b/pkg/storage/tsdb/users_scanner_config_test.go index 2abe0451c4..9e6d20a37c 100644 --- a/pkg/storage/tsdb/users_scanner_config_test.go +++ b/pkg/storage/tsdb/users_scanner_config_test.go @@ -97,7 +97,6 @@ func TestUsersScannerConfig_Validate(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { t.Parallel() diff --git a/pkg/storegateway/bucket_index_metadata_fetcher_test.go b/pkg/storegateway/bucket_index_metadata_fetcher_test.go index 9a7f7dd562..8bd23eaa44 100644 --- a/pkg/storegateway/bucket_index_metadata_fetcher_test.go +++ b/pkg/storegateway/bucket_index_metadata_fetcher_test.go @@ -86,6 +86,7 @@ func TestBucketIndexMetadataFetcher_Fetch(t *testing.T) { blocks_meta_synced{state="marked-for-no-compact"} 0 blocks_meta_synced{state="no-bucket-index"} 0 blocks_meta_synced{state="no-meta-json"} 0 + blocks_meta_synced{state="parquet-migrated"} 0 blocks_meta_synced{state="time-excluded"} 0 blocks_meta_synced{state="too-fresh"} 0 @@ -134,6 +135,7 @@ func TestBucketIndexMetadataFetcher_Fetch_KeyPermissionDenied(t *testing.T) { blocks_meta_synced{state="marked-for-no-compact"} 0 blocks_meta_synced{state="no-bucket-index"} 0 blocks_meta_synced{state="no-meta-json"} 0 + blocks_meta_synced{state="parquet-migrated"} 0 blocks_meta_synced{state="time-excluded"} 0 blocks_meta_synced{state="too-fresh"} 0 # HELP blocks_meta_syncs_total Total blocks metadata synchronization attempts @@ -185,6 +187,7 @@ func TestBucketIndexMetadataFetcher_Fetch_NoBucketIndex(t *testing.T) { blocks_meta_synced{state="marked-for-no-compact"} 0 blocks_meta_synced{state="no-bucket-index"} 1 blocks_meta_synced{state="no-meta-json"} 0 + blocks_meta_synced{state="parquet-migrated"} 0 blocks_meta_synced{state="time-excluded"} 0 blocks_meta_synced{state="too-fresh"} 0 @@ -240,6 +243,7 @@ func TestBucketIndexMetadataFetcher_Fetch_CorruptedBucketIndex(t *testing.T) { blocks_meta_synced{state="marked-for-no-compact"} 0 blocks_meta_synced{state="no-bucket-index"} 0 blocks_meta_synced{state="no-meta-json"} 0 + blocks_meta_synced{state="parquet-migrated"} 0 blocks_meta_synced{state="time-excluded"} 0 blocks_meta_synced{state="too-fresh"} 0 @@ -287,6 +291,7 @@ func TestBucketIndexMetadataFetcher_Fetch_ShouldResetGaugeMetrics(t *testing.T) blocks_meta_synced{state="marked-for-no-compact"} 0 blocks_meta_synced{state="no-bucket-index"} 0 blocks_meta_synced{state="no-meta-json"} 0 + blocks_meta_synced{state="parquet-migrated"} 0 blocks_meta_synced{state="time-excluded"} 0 blocks_meta_synced{state="too-fresh"} 0 `), "blocks_meta_synced")) @@ -311,6 +316,7 @@ func TestBucketIndexMetadataFetcher_Fetch_ShouldResetGaugeMetrics(t *testing.T) blocks_meta_synced{state="marked-for-no-compact"} 0 blocks_meta_synced{state="no-bucket-index"} 1 blocks_meta_synced{state="no-meta-json"} 0 + blocks_meta_synced{state="parquet-migrated"} 0 blocks_meta_synced{state="time-excluded"} 0 blocks_meta_synced{state="too-fresh"} 0 `), "blocks_meta_synced")) @@ -343,6 +349,7 @@ func TestBucketIndexMetadataFetcher_Fetch_ShouldResetGaugeMetrics(t *testing.T) blocks_meta_synced{state="marked-for-no-compact"} 0 blocks_meta_synced{state="no-bucket-index"} 0 blocks_meta_synced{state="no-meta-json"} 0 + blocks_meta_synced{state="parquet-migrated"} 0 blocks_meta_synced{state="time-excluded"} 0 blocks_meta_synced{state="too-fresh"} 0 `), "blocks_meta_synced")) @@ -369,6 +376,7 @@ func TestBucketIndexMetadataFetcher_Fetch_ShouldResetGaugeMetrics(t *testing.T) blocks_meta_synced{state="marked-for-no-compact"} 0 blocks_meta_synced{state="no-bucket-index"} 0 blocks_meta_synced{state="no-meta-json"} 0 + blocks_meta_synced{state="parquet-migrated"} 0 blocks_meta_synced{state="time-excluded"} 0 blocks_meta_synced{state="too-fresh"} 0 `), "blocks_meta_synced")) diff --git a/pkg/storegateway/bucket_store_metrics_test.go b/pkg/storegateway/bucket_store_metrics_test.go index ac4ff00df8..2b087b89b3 100644 --- a/pkg/storegateway/bucket_store_metrics_test.go +++ b/pkg/storegateway/bucket_store_metrics_test.go @@ -631,12 +631,11 @@ func benchmarkMetricsCollection(b *testing.B, users int) { mainReg.MustRegister(tsdbMetrics) base := 123456.0 - for i := 0; i < users; i++ { + for i := range users { tsdbMetrics.AddUserRegistry(fmt.Sprintf("user-%d", i), populateMockedBucketStoreMetrics(base*float64(i))) } - b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { _, _ = mainReg.Gather() } } diff --git a/pkg/storegateway/bucket_stores_test.go b/pkg/storegateway/bucket_stores_test.go index 69c018ccfa..831b7afb2b 100644 --- a/pkg/storegateway/bucket_stores_test.go +++ b/pkg/storegateway/bucket_stores_test.go @@ -10,6 +10,7 @@ import ( "os" "path" "path/filepath" + "slices" "sort" "strings" "testing" @@ -470,7 +471,6 @@ func TestBucketStores_scanUsers(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { t.Parallel() @@ -659,6 +659,7 @@ func TestBucketStores_SyncBlocksWithIgnoreBlocksBefore(t *testing.T) { cortex_blocks_meta_synced{state="marked-for-deletion"} 0 cortex_blocks_meta_synced{state="marked-for-no-compact"} 0 cortex_blocks_meta_synced{state="no-meta-json"} 0 + cortex_blocks_meta_synced{state="parquet-migrated"} 0 cortex_blocks_meta_synced{state="time-excluded"} 1 cortex_blocks_meta_synced{state="too-fresh"} 0 # HELP cortex_blocks_meta_syncs_total Total blocks metadata synchronization attempts @@ -701,7 +702,7 @@ func generateStorageBlock(t *testing.T, storageDir, userID string, metricName st require.NoError(t, db.Close()) }() - series := labels.Labels{labels.Label{Name: labels.MetricName, Value: metricName}} + series := labels.FromStrings(labels.MetricName, metricName) app := db.Appender(context.Background()) for ts := minT; ts < maxT; ts += int64(step) { @@ -996,7 +997,7 @@ func (u *userShardingStrategy) FilterUsers(ctx context.Context, userIDs []string } func (u *userShardingStrategy) FilterBlocks(ctx context.Context, userID string, metas map[ulid.ULID]*thanos_metadata.Meta, loaded map[ulid.ULID]struct{}, synced block.GaugeVec) error { - if util.StringsContain(u.users, userID) { + if slices.Contains(u.users, userID) { return nil } @@ -1007,7 +1008,7 @@ func (u *userShardingStrategy) FilterBlocks(ctx context.Context, userID string, } func (u *userShardingStrategy) OwnBlock(userID string, _ thanos_metadata.Meta) (bool, error) { - if util.StringsContain(u.users, userID) { + if slices.Contains(u.users, userID) { return true, nil } diff --git a/pkg/storegateway/gateway.go b/pkg/storegateway/gateway.go index 9e61d63abf..96eb8c31cc 100644 --- a/pkg/storegateway/gateway.go +++ b/pkg/storegateway/gateway.go @@ -5,6 +5,7 @@ import ( "flag" "fmt" "net/http" + "slices" "strings" "time" @@ -16,7 +17,6 @@ import ( "github.com/thanos-io/objstore" "github.com/thanos-io/thanos/pkg/extprom" "github.com/thanos-io/thanos/pkg/store/storepb" - "github.com/weaveworks/common/httpgrpc" "github.com/weaveworks/common/logging" "github.com/cortexproject/cortex/pkg/configs" @@ -89,7 +89,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { // Validate the Config. func (cfg *Config) Validate(limits validation.Limits, monitoredResources flagext.StringSliceCSV) error { if cfg.ShardingEnabled { - if !util.StringsContain(supportedShardingStrategies, cfg.ShardingStrategy) { + if !slices.Contains(supportedShardingStrategies, cfg.ShardingStrategy) { return errInvalidShardingStrategy } @@ -437,7 +437,7 @@ func (g *StoreGateway) checkResourceUtilization() error { if err := g.resourceBasedLimiter.AcceptNewRequest(); err != nil { level.Warn(g.logger).Log("msg", "failed to accept request", "err", err) - return httpgrpc.Errorf(http.StatusServiceUnavailable, "failed to query: %s", util_limiter.ErrResourceLimitReachedStr) + return util_limiter.ErrResourceLimitReached } return nil diff --git a/pkg/storegateway/gateway_ring.go b/pkg/storegateway/gateway_ring.go index fc39f80b42..798d1221a2 100644 --- a/pkg/storegateway/gateway_ring.go +++ b/pkg/storegateway/gateway_ring.go @@ -68,6 +68,7 @@ type RingConfig struct { ZoneAwarenessEnabled bool `yaml:"zone_awareness_enabled"` KeepInstanceInTheRingOnShutdown bool `yaml:"keep_instance_in_the_ring_on_shutdown"` ZoneStableShuffleSharding bool `yaml:"zone_stable_shuffle_sharding" doc:"hidden"` + DetailedMetricsEnabled bool `yaml:"detailed_metrics_enabled"` // Wait ring stability. WaitStabilityMinDuration time.Duration `yaml:"wait_stability_min_duration"` @@ -107,6 +108,7 @@ func (cfg *RingConfig) RegisterFlags(f *flag.FlagSet) { f.BoolVar(&cfg.ZoneAwarenessEnabled, ringFlagsPrefix+"zone-awareness-enabled", false, "True to enable zone-awareness and replicate blocks across different availability zones.") f.BoolVar(&cfg.KeepInstanceInTheRingOnShutdown, ringFlagsPrefix+"keep-instance-in-the-ring-on-shutdown", false, "True to keep the store gateway instance in the ring when it shuts down. The instance will then be auto-forgotten from the ring after 10*heartbeat_timeout.") f.BoolVar(&cfg.ZoneStableShuffleSharding, ringFlagsPrefix+"zone-stable-shuffle-sharding", true, "If true, use zone stable shuffle sharding algorithm. Otherwise, use the default shuffle sharding algorithm.") + f.BoolVar(&cfg.DetailedMetricsEnabled, ringFlagsPrefix+"detailed-metrics-enabled", true, "Set to true to enable ring detailed metrics. These metrics provide detailed information, such as token count and ownership per tenant. Disabling them can significantly decrease the number of metrics emitted.") // Wait stability flags. f.DurationVar(&cfg.WaitStabilityMinDuration, ringFlagsPrefix+"wait-stability-min-duration", time.Minute, "Minimum time to wait for ring stability at startup. 0 to disable.") @@ -138,6 +140,7 @@ func (cfg *RingConfig) ToRingConfig() ring.Config { rc.ReplicationFactor = cfg.ReplicationFactor rc.ZoneAwarenessEnabled = cfg.ZoneAwarenessEnabled rc.SubringCacheDisabled = true + rc.DetailedMetricsEnabled = cfg.DetailedMetricsEnabled return rc } diff --git a/pkg/storegateway/gateway_ring_test.go b/pkg/storegateway/gateway_ring_test.go index c00f227f8a..6142fd131a 100644 --- a/pkg/storegateway/gateway_ring_test.go +++ b/pkg/storegateway/gateway_ring_test.go @@ -57,7 +57,6 @@ func TestIsHealthyForStoreGatewayOperations(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { t.Parallel() diff --git a/pkg/storegateway/gateway_test.go b/pkg/storegateway/gateway_test.go index 57bccae5fe..6dcc8cd991 100644 --- a/pkg/storegateway/gateway_test.go +++ b/pkg/storegateway/gateway_test.go @@ -84,7 +84,6 @@ func TestConfig_Validate(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { t.Parallel() cfg := &Config{} @@ -131,7 +130,6 @@ func TestStoreGateway_InitialSyncWithDefaultShardingEnabled(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { t.Parallel() ctx := context.Background() @@ -146,7 +144,7 @@ func TestStoreGateway_InitialSyncWithDefaultShardingEnabled(t *testing.T) { // Setup the initial instance state in the ring. if testData.initialExists { - require.NoError(t, ringStore.CAS(ctx, RingKey, func(in interface{}) (interface{}, bool, error) { + require.NoError(t, ringStore.CAS(ctx, RingKey, func(in any) (any, bool, error) { ringDesc := ring.GetOrCreateRingDesc(in) ringDesc.AddIngester(gatewayCfg.ShardingRing.InstanceID, gatewayCfg.ShardingRing.InstanceAddr, "", testData.initialTokens, testData.initialState, time.Now()) return ringDesc, true, nil @@ -532,7 +530,7 @@ func TestStoreGateway_BlocksSyncWithDefaultSharding_RingTopologyChangedAfterScal // store-gateways behaves with regards to blocks syncing while other replicas are JOINING. // Wait until all the initial store-gateways sees all new store-gateways too. - test.Poll(t, 5*time.Second, float64(numAllGateways*numInitialGateways), func() interface{} { + test.Poll(t, 5*time.Second, float64(numAllGateways*numInitialGateways), func() any { metrics := initialRegistries.BuildMetricFamiliesPerUser() return metrics.GetSumOfGauges("cortex_ring_members") }) @@ -568,7 +566,7 @@ func TestStoreGateway_BlocksSyncWithDefaultSharding_RingTopologyChangedAfterScal // At this point the new store-gateways are expected to be ACTIVE in the ring and all the initial // store-gateways should unload blocks they don't own anymore. - test.Poll(t, 5*time.Second, float64(expectedBlocksLoaded), func() interface{} { + test.Poll(t, 5*time.Second, float64(expectedBlocksLoaded), func() any { metrics := allRegistries.BuildMetricFamiliesPerUser() return metrics.GetSumOfGauges("cortex_bucket_store_blocks_loaded") }) @@ -596,7 +594,6 @@ func TestStoreGateway_ShouldSupportLoadRingTokensFromFile(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { t.Parallel() tokensFile, err := os.CreateTemp(os.TempDir(), "tokens-*") @@ -812,7 +809,6 @@ func TestStoreGateway_SyncOnRingTopologyChanged(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { t.Parallel() ctx := context.Background() @@ -835,7 +831,7 @@ func TestStoreGateway_SyncOnRingTopologyChanged(t *testing.T) { require.NoError(t, err) // Store the initial ring state before starting the gateway. - require.NoError(t, ringStore.CAS(ctx, RingKey, func(in interface{}) (interface{}, bool, error) { + require.NoError(t, ringStore.CAS(ctx, RingKey, func(in any) (any, bool, error) { ringDesc := ring.GetOrCreateRingDesc(in) testData.setupRing(ringDesc) return ringDesc, true, nil @@ -851,7 +847,7 @@ func TestStoreGateway_SyncOnRingTopologyChanged(t *testing.T) { assert.Equal(t, float64(1), metrics.GetSumOfCounters("cortex_storegateway_bucket_sync_total")) // Change the ring topology. - require.NoError(t, ringStore.CAS(ctx, RingKey, func(in interface{}) (interface{}, bool, error) { + require.NoError(t, ringStore.CAS(ctx, RingKey, func(in any) (any, bool, error) { ringDesc := ring.GetOrCreateRingDesc(in) testData.updateRing(ringDesc) return ringDesc, true, nil @@ -859,7 +855,7 @@ func TestStoreGateway_SyncOnRingTopologyChanged(t *testing.T) { // Assert whether the sync triggered or not. if testData.expectedSync { - test.Poll(t, time.Second, float64(2), func() interface{} { + test.Poll(t, time.Second, float64(2), func() any { metrics := regs.BuildMetricFamiliesPerUser() return metrics.GetSumOfCounters("cortex_storegateway_bucket_sync_total") }) @@ -900,7 +896,7 @@ func TestStoreGateway_RingLifecyclerShouldAutoForgetUnhealthyInstances(t *testin defer services.StopAndAwaitTerminated(ctx, g) //nolint:errcheck // Add an unhealthy instance to the ring. - require.NoError(t, ringStore.CAS(ctx, RingKey, func(in interface{}) (interface{}, bool, error) { + require.NoError(t, ringStore.CAS(ctx, RingKey, func(in any) (any, bool, error) { ringDesc := ring.GetOrCreateRingDesc(in) tg := ring.NewRandomTokenGenerator() instance := ringDesc.AddIngester(unhealthyInstanceID, "1.1.1.1", "", tg.GenerateTokens(ringDesc, unhealthyInstanceID, "", RingNumTokens, true), ring.ACTIVE, time.Now()) @@ -911,7 +907,7 @@ func TestStoreGateway_RingLifecyclerShouldAutoForgetUnhealthyInstances(t *testin })) // Ensure the unhealthy instance is removed from the ring. - test.Poll(t, time.Second, false, func() interface{} { + test.Poll(t, time.Second, false, func() any { d, err := ringStore.Get(ctx, RingKey) if err != nil { return err @@ -969,7 +965,6 @@ func TestStoreGateway_SeriesQueryingShouldRemoveExternalLabels(t *testing.T) { } for _, bucketIndexEnabled := range []bool{true, false} { - bucketIndexEnabled := bucketIndexEnabled t.Run(fmt.Sprintf("bucket index enabled = %v", bucketIndexEnabled), func(t *testing.T) { t.Parallel() // Create a store-gateway used to query back the series from the blocks. @@ -998,7 +993,7 @@ func TestStoreGateway_SeriesQueryingShouldRemoveExternalLabels(t *testing.T) { assert.Empty(t, srv.Warnings) assert.Len(t, srv.SeriesSet, numSeries) - for seriesID := 0; seriesID < numSeries; seriesID++ { + for seriesID := range numSeries { actual := srv.SeriesSet[seriesID] // Ensure Cortex external labels have been removed. @@ -1239,8 +1234,9 @@ func TestStoreGateway_SeriesThrottledByResourceMonitor(t *testing.T) { srv := newBucketStoreSeriesServer(setUserIDToGRPCContext(ctx, userID)) err = g.Series(req, srv) require.Error(t, err) - exhaustedErr := util_limiter.ResourceLimitReachedError{} - require.ErrorContains(t, err, exhaustedErr.Error()) + + // Expected error from isRetryableError in blocks_store_queryable.go + require.ErrorIs(t, err, util_limiter.ErrResourceLimitReached) } func mockGatewayConfig() Config { @@ -1299,7 +1295,7 @@ func mockTSDB(t *testing.T, dir string, numSeries, numBlocks int, minT, maxT int step := (maxT - minT) / int64(numSeries) ctx := context.Background() addSample := func(i int) { - lbls := labels.Labels{labels.Label{Name: "series_id", Value: strconv.Itoa(i)}} + lbls := labels.FromStrings("series_id", strconv.Itoa(i)) app := db.Appender(ctx) _, err := app.Append(0, lbls, minT+(step*int64(i)), float64(i)) @@ -1315,7 +1311,7 @@ func mockTSDB(t *testing.T, dir string, numSeries, numBlocks int, minT, maxT int i++ } } else { - for i := 0; i < numSeries; i++ { + for i := range numSeries { addSample(i) } } diff --git a/pkg/storegateway/partitioner.go b/pkg/storegateway/partitioner.go index 816a45d8a5..a7b6477ec1 100644 --- a/pkg/storegateway/partitioner.go +++ b/pkg/storegateway/partitioner.go @@ -41,7 +41,7 @@ func newGapBasedPartitioner(maxGapBytes uint64, reg prometheus.Registerer) *gapB func (p *gapBasedPartitioner) Partition(length int, rng func(int) (uint64, uint64)) []store.Part { // Calculate the size of requested ranges. requestedBytes := uint64(0) - for i := 0; i < length; i++ { + for i := range length { start, end := rng(i) requestedBytes += end - start } diff --git a/pkg/storegateway/sharding_strategy_test.go b/pkg/storegateway/sharding_strategy_test.go index 4f9cfe10f1..2f2e002e09 100644 --- a/pkg/storegateway/sharding_strategy_test.go +++ b/pkg/storegateway/sharding_strategy_test.go @@ -242,8 +242,6 @@ func TestDefaultShardingStrategy(t *testing.T) { } for testName, testData := range tests { - testName := testName - testData := testData t.Run(testName, func(t *testing.T) { t.Parallel() @@ -253,7 +251,7 @@ func TestDefaultShardingStrategy(t *testing.T) { t.Cleanup(func() { assert.NoError(t, closer.Close()) }) // Initialize the ring state. - require.NoError(t, store.CAS(ctx, "test", func(in interface{}) (interface{}, bool, error) { + require.NoError(t, store.CAS(ctx, "test", func(in any) (any, bool, error) { d := ring.NewDesc() testData.setupRing(d) return d, true, nil @@ -620,9 +618,6 @@ func TestShuffleShardingStrategy(t *testing.T) { for testName, testData := range tests { for _, zoneStableShuffleSharding := range []bool{false, true} { - testName := testName - testData := testData - t.Run(fmt.Sprintf("%s %s", testName, strconv.FormatBool(zoneStableShuffleSharding)), func(t *testing.T) { t.Parallel() @@ -631,7 +626,7 @@ func TestShuffleShardingStrategy(t *testing.T) { t.Cleanup(func() { assert.NoError(t, closer.Close()) }) // Initialize the ring state. - require.NoError(t, store.CAS(ctx, "test", func(in interface{}) (interface{}, bool, error) { + require.NoError(t, store.CAS(ctx, "test", func(in any) (any, bool, error) { d := ring.NewDesc() testData.setupRing(d) return d, true, nil @@ -722,7 +717,7 @@ func TestDefaultShardingStrategy_OwnBlock(t *testing.T) { t.Cleanup(func() { assert.NoError(t, closer.Close()) }) // Initialize the ring state. - require.NoError(t, store.CAS(ctx, "test", func(in interface{}) (interface{}, bool, error) { + require.NoError(t, store.CAS(ctx, "test", func(in any) (any, bool, error) { d := ring.NewDesc() d.AddIngester("instance-1", "127.0.0.1", "zone-a", []uint32{block1Hash + 1}, ring.ACTIVE, registeredAt) d.AddIngester("instance-2", "127.0.0.2", "zone-b", []uint32{block2Hash + 1}, ring.ACTIVE, registeredAt) @@ -772,7 +767,7 @@ func TestShuffleShardingStrategy_OwnBlock(t *testing.T) { t.Cleanup(func() { assert.NoError(t, closer.Close()) }) // Initialize the ring state. - require.NoError(t, store.CAS(ctx, "test", func(in interface{}) (interface{}, bool, error) { + require.NoError(t, store.CAS(ctx, "test", func(in any) (any, bool, error) { d := ring.NewDesc() d.AddIngester("instance-1", "127.0.0.1", "zone-a", []uint32{block1Hash + 1}, ring.ACTIVE, registeredAt) d.AddIngester("instance-2", "127.0.0.2", "zone-b", []uint32{block2Hash + 1}, ring.ACTIVE, registeredAt) diff --git a/pkg/tracing/migration/bridge_wrapper.go b/pkg/tracing/migration/bridge_wrapper.go index 5a17cc7fec..96cc5abc4d 100644 --- a/pkg/tracing/migration/bridge_wrapper.go +++ b/pkg/tracing/migration/bridge_wrapper.go @@ -20,7 +20,7 @@ func (b *CortexBridgeTracerWrapper) StartSpan(operationName string, opts ...open return b.bt.StartSpan(operationName, opts...) } -func (b *CortexBridgeTracerWrapper) Inject(sm opentracing.SpanContext, format interface{}, carrier interface{}) error { +func (b *CortexBridgeTracerWrapper) Inject(sm opentracing.SpanContext, format any, carrier any) error { builtinFormat, ok := format.(opentracing.BuiltinFormat) if !ok { @@ -57,7 +57,7 @@ func (b *CortexBridgeTracerWrapper) Inject(sm opentracing.SpanContext, format in } } -func (b *CortexBridgeTracerWrapper) Extract(format interface{}, carrier interface{}) (opentracing.SpanContext, error) { +func (b *CortexBridgeTracerWrapper) Extract(format any, carrier any) (opentracing.SpanContext, error) { builtinFormat, ok := format.(opentracing.BuiltinFormat) if !ok { diff --git a/pkg/tracing/migration/bridge_wrapper_test.go b/pkg/tracing/migration/bridge_wrapper_test.go index ea3375958c..54eb9cdf90 100644 --- a/pkg/tracing/migration/bridge_wrapper_test.go +++ b/pkg/tracing/migration/bridge_wrapper_test.go @@ -75,8 +75,8 @@ func (p *mockPropagator) Extract(ctx context.Context, carrier propagation.TextMa func TestCortexBridgeTracerWrapper_Inject(t *testing.T) { tests := []struct { name string - format interface{} - carrier interface{} + format any + carrier any wantedValues map[string]string }{ { diff --git a/pkg/util/active_user_test.go b/pkg/util/active_user_test.go index 4db9e7b0cd..60e97f6dbc 100644 --- a/pkg/util/active_user_test.go +++ b/pkg/util/active_user_test.go @@ -61,7 +61,7 @@ func TestActiveUserConcurrentUpdateAndPurge(t *testing.T) { latestTS := atomic.NewInt64(0) - for j := 0; j < count; j++ { + for range count { done.Add(1) go func() { @@ -79,7 +79,7 @@ func TestActiveUserConcurrentUpdateAndPurge(t *testing.T) { } previousLatest := int64(0) - for i := 0; i < 10; i++ { + for range 10 { time.Sleep(100 * time.Millisecond) latest := latestTS.Load() @@ -110,7 +110,7 @@ func BenchmarkActiveUsers_UpdateUserTimestamp(b *testing.B) { startGoroutinesDoingUpdates(b, c, as) - for i := 0; i < b.N; i++ { + for i := 0; b.Loop(); i++ { as.UpdateUserTimestamp("test", int64(i)) } }) @@ -124,7 +124,7 @@ func BenchmarkActiveUsers_Purge(b *testing.B) { startGoroutinesDoingUpdates(b, c, as) - for i := 0; i < b.N; i++ { + for i := 0; b.Loop(); i++ { as.PurgeInactiveUsers(int64(i)) } }) @@ -136,7 +136,7 @@ func startGoroutinesDoingUpdates(b *testing.B, count int, as *ActiveUsers) { stop := atomic.NewBool(false) started := sync.WaitGroup{} - for j := 0; j < count; j++ { + for j := range count { done.Add(1) started.Add(1) userID := fmt.Sprintf("user-%d", j) diff --git a/pkg/util/api/response.go b/pkg/util/api/response.go index c58baf60b9..74b4074212 100644 --- a/pkg/util/api/response.go +++ b/pkg/util/api/response.go @@ -19,7 +19,7 @@ const ( // Response defines the Prometheus response format. type Response struct { Status string `json:"status"` - Data interface{} `json:"data,omitempty"` + Data any `json:"data,omitempty"` ErrorType v1.ErrorType `json:"errorType,omitempty"` Error string `json:"error,omitempty"` Warnings []string `json:"warnings,omitempty"` diff --git a/pkg/util/backoff/backoff_test.go b/pkg/util/backoff/backoff_test.go index dff6432c06..942cebb6a4 100644 --- a/pkg/util/backoff/backoff_test.go +++ b/pkg/util/backoff/backoff_test.go @@ -80,7 +80,6 @@ func TestBackoff_NextDelay(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { t.Parallel() diff --git a/pkg/util/concurrency/runner.go b/pkg/util/concurrency/runner.go index 8f6d180c77..df9b5e37a1 100644 --- a/pkg/util/concurrency/runner.go +++ b/pkg/util/concurrency/runner.go @@ -30,7 +30,7 @@ func ForEachUser(ctx context.Context, userIDs []string, concurrency int, userFun wg := sync.WaitGroup{} routines := min(concurrency, len(userIDs)) - for ix := 0; ix < routines; ix++ { + for range routines { wg.Add(1) go func() { defer wg.Done() @@ -62,13 +62,13 @@ func ForEachUser(ctx context.Context, userIDs []string, concurrency int, userFun // ForEach runs the provided jobFunc for each job up to concurrency concurrent workers. // The execution breaks on first error encountered. -func ForEach(ctx context.Context, jobs []interface{}, concurrency int, jobFunc func(ctx context.Context, job interface{}) error) error { +func ForEach(ctx context.Context, jobs []any, concurrency int, jobFunc func(ctx context.Context, job any) error) error { if len(jobs) == 0 { return nil } // Push all jobs to a channel. - ch := make(chan interface{}, len(jobs)) + ch := make(chan any, len(jobs)) for _, job := range jobs { ch <- job } @@ -77,7 +77,7 @@ func ForEach(ctx context.Context, jobs []interface{}, concurrency int, jobFunc f // Start workers to process jobs. g, ctx := errgroup.WithContext(ctx) routines := min(concurrency, len(jobs)) - for ix := 0; ix < routines; ix++ { + for range routines { g.Go(func() error { for job := range ch { if err := ctx.Err(); err != nil { @@ -98,9 +98,9 @@ func ForEach(ctx context.Context, jobs []interface{}, concurrency int, jobFunc f } // CreateJobsFromStrings is a utility to create jobs from an slice of strings. -func CreateJobsFromStrings(values []string) []interface{} { - jobs := make([]interface{}, len(values)) - for i := 0; i < len(values); i++ { +func CreateJobsFromStrings(values []string) []any { + jobs := make([]any, len(values)) + for i := range values { jobs[i] = values[i] } return jobs diff --git a/pkg/util/concurrency/runner_test.go b/pkg/util/concurrency/runner_test.go index 54b171d5b1..75439268a2 100644 --- a/pkg/util/concurrency/runner_test.go +++ b/pkg/util/concurrency/runner_test.go @@ -83,7 +83,7 @@ func TestForEach(t *testing.T) { jobs := []string{"a", "b", "c"} - err := ForEach(ctx, CreateJobsFromStrings(jobs), 2, func(ctx context.Context, job interface{}) error { + err := ForEach(ctx, CreateJobsFromStrings(jobs), 2, func(ctx context.Context, job any) error { processedMx.Lock() defer processedMx.Unlock() processed = append(processed, job.(string)) @@ -102,7 +102,7 @@ func TestForEach_ShouldBreakOnFirstError_ContextCancellationHandled(t *testing.T processed atomic.Int32 ) - err := ForEach(ctx, []interface{}{"a", "b", "c"}, 2, func(ctx context.Context, job interface{}) error { + err := ForEach(ctx, []any{"a", "b", "c"}, 2, func(ctx context.Context, job any) error { if processed.CompareAndSwap(0, 1) { return errors.New("the first request is failing") } @@ -137,7 +137,7 @@ func TestForEach_ShouldBreakOnFirstError_ContextCancellationUnhandled(t *testing var wg sync.WaitGroup wg.Add(2) - err := ForEach(ctx, []interface{}{"a", "b", "c"}, 2, func(ctx context.Context, job interface{}) error { + err := ForEach(ctx, []any{"a", "b", "c"}, 2, func(ctx context.Context, job any) error { wg.Done() if processed.CompareAndSwap(0, 1) { @@ -162,7 +162,7 @@ func TestForEach_ShouldBreakOnFirstError_ContextCancellationUnhandled(t *testing } func TestForEach_ShouldReturnImmediatelyOnNoJobsProvided(t *testing.T) { - require.NoError(t, ForEach(context.Background(), nil, 2, func(ctx context.Context, job interface{}) error { + require.NoError(t, ForEach(context.Background(), nil, 2, func(ctx context.Context, job any) error { return nil })) } diff --git a/pkg/util/config.go b/pkg/util/config.go index e1032d0f6f..9bf1c7184f 100644 --- a/pkg/util/config.go +++ b/pkg/util/config.go @@ -6,8 +6,8 @@ import ( ) // DiffConfig utility function that returns the diff between two config map objects -func DiffConfig(defaultConfig, actualConfig map[interface{}]interface{}) (map[interface{}]interface{}, error) { - output := make(map[interface{}]interface{}) +func DiffConfig(defaultConfig, actualConfig map[any]any) (map[any]any, error) { + output := make(map[any]any) for key, value := range actualConfig { @@ -33,8 +33,8 @@ func DiffConfig(defaultConfig, actualConfig map[interface{}]interface{}) (map[in if !ok || defaultV != v { output[key] = v } - case []interface{}: - defaultV, ok := defaultValue.([]interface{}) + case []any: + defaultV, ok := defaultValue.([]any) if !ok || !reflect.DeepEqual(defaultV, v) { output[key] = v } @@ -47,8 +47,8 @@ func DiffConfig(defaultConfig, actualConfig map[interface{}]interface{}) (map[in if defaultValue != nil { output[key] = v } - case map[interface{}]interface{}: - defaultV, ok := defaultValue.(map[interface{}]interface{}) + case map[any]any: + defaultV, ok := defaultValue.(map[any]any) if !ok { output[key] = value } diff --git a/pkg/util/discardedseries/perlabelset_tracker.go b/pkg/util/discardedseries/perlabelset_tracker.go new file mode 100644 index 0000000000..d209811781 --- /dev/null +++ b/pkg/util/discardedseries/perlabelset_tracker.go @@ -0,0 +1,141 @@ +package discardedseries + +import ( + "sync" + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +// TODO: if we change per labelset series limit from one reasoning to many, we can remove the hardcoded reasoning and add an extra reasoning map +const ( + perLabelsetSeriesLimit = "per_labelset_series_limit" +) + +type labelsetCounterStruct struct { + *sync.RWMutex + labelsetSeriesMap map[uint64]*seriesCounterStruct +} + +type DiscardedSeriesPerLabelsetTracker struct { + *sync.RWMutex + userLabelsetMap map[string]*labelsetCounterStruct + discardedSeriesPerLabelsetGauge *prometheus.GaugeVec +} + +func NewDiscardedSeriesPerLabelsetTracker(discardedSeriesPerLabelsetGauge *prometheus.GaugeVec) *DiscardedSeriesPerLabelsetTracker { + tracker := &DiscardedSeriesPerLabelsetTracker{ + RWMutex: &sync.RWMutex{}, + userLabelsetMap: make(map[string]*labelsetCounterStruct), + discardedSeriesPerLabelsetGauge: discardedSeriesPerLabelsetGauge, + } + return tracker +} + +func (t *DiscardedSeriesPerLabelsetTracker) Track(user string, series uint64, matchedLabelsetHash uint64, matchedLabelsetId string) { + t.RLock() + labelsetCounter, ok := t.userLabelsetMap[user] + t.RUnlock() + if !ok { + t.Lock() + labelsetCounter, ok = t.userLabelsetMap[user] + if !ok { + labelsetCounter = &labelsetCounterStruct{ + RWMutex: &sync.RWMutex{}, + labelsetSeriesMap: make(map[uint64]*seriesCounterStruct), + } + t.userLabelsetMap[user] = labelsetCounter + } + t.Unlock() + } + + labelsetCounter.RLock() + seriesCounter, ok := labelsetCounter.labelsetSeriesMap[matchedLabelsetHash] + labelsetCounter.RUnlock() + if !ok { + labelsetCounter.Lock() + seriesCounter, ok = labelsetCounter.labelsetSeriesMap[matchedLabelsetHash] + if !ok { + seriesCounter = &seriesCounterStruct{ + RWMutex: &sync.RWMutex{}, + seriesCountMap: make(map[uint64]struct{}), + labelsetId: matchedLabelsetId, + } + labelsetCounter.labelsetSeriesMap[matchedLabelsetHash] = seriesCounter + } + labelsetCounter.Unlock() + } + + seriesCounter.RLock() + _, ok = seriesCounter.seriesCountMap[series] + seriesCounter.RUnlock() + if !ok { + seriesCounter.Lock() + _, ok = seriesCounter.seriesCountMap[series] + if !ok { + seriesCounter.seriesCountMap[series] = struct{}{} + } + seriesCounter.Unlock() + } +} + +func (t *DiscardedSeriesPerLabelsetTracker) UpdateMetrics() { + usersToDelete := make([]string, 0) + labelsetsToDelete := make([]uint64, 0) + t.RLock() + for user, labelsetCounter := range t.userLabelsetMap { + labelsetCounter.RLock() + if len(labelsetCounter.labelsetSeriesMap) == 0 { + usersToDelete = append(usersToDelete, user) + } + for labelsetHash, seriesCounter := range labelsetCounter.labelsetSeriesMap { + seriesCounter.Lock() + count := len(seriesCounter.seriesCountMap) + t.discardedSeriesPerLabelsetGauge.WithLabelValues(perLabelsetSeriesLimit, user, seriesCounter.labelsetId).Set(float64(count)) + clear(seriesCounter.seriesCountMap) + if count == 0 { + labelsetsToDelete = append(labelsetsToDelete, labelsetHash) + } + seriesCounter.Unlock() + } + labelsetCounter.RUnlock() + if len(labelsetsToDelete) > 0 { + labelsetCounter.Lock() + for _, labelsetHash := range labelsetsToDelete { + if _, ok := labelsetCounter.labelsetSeriesMap[labelsetHash]; ok { + labelsetId := labelsetCounter.labelsetSeriesMap[labelsetHash].labelsetId + t.discardedSeriesPerLabelsetGauge.DeleteLabelValues(perLabelsetSeriesLimit, user, labelsetId) + delete(labelsetCounter.labelsetSeriesMap, labelsetHash) + } + } + labelsetCounter.Unlock() + } + } + t.RUnlock() + if len(usersToDelete) > 0 { + t.Lock() + for _, user := range usersToDelete { + delete(t.userLabelsetMap, user) + } + t.Unlock() + } +} + +func (t *DiscardedSeriesPerLabelsetTracker) StartVendDiscardedSeriesMetricGoroutine() { + go func() { + ticker := time.NewTicker(vendMetricsInterval) + for range ticker.C { + t.UpdateMetrics() + } + }() +} + +// only used in testing +func (t *DiscardedSeriesPerLabelsetTracker) getSeriesCount(user string, labelsetLimitHash uint64) int { + if labelsetCounter, ok := t.userLabelsetMap[user]; ok { + if seriesCounter, ok := labelsetCounter.labelsetSeriesMap[labelsetLimitHash]; ok { + return len(seriesCounter.seriesCountMap) + } + } + return 0 +} diff --git a/pkg/util/discardedseries/perlabelset_tracker_test.go b/pkg/util/discardedseries/perlabelset_tracker_test.go new file mode 100644 index 0000000000..849f987fb1 --- /dev/null +++ b/pkg/util/discardedseries/perlabelset_tracker_test.go @@ -0,0 +1,118 @@ +package discardedseries + +import ( + "testing" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/testutil" + "github.com/prometheus/prometheus/model/labels" + "github.com/stretchr/testify/require" +) + +func TestPerLabelsetDiscardedSeriesTracker(t *testing.T) { + gauge := prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "cortex_discarded_series_per_labelset", + Help: "The number of series that include discarded samples for each labelset.", + }, + []string{"reason", "user", "labelset"}, + ) + + tracker := NewDiscardedSeriesPerLabelsetTracker(gauge) + user1 := "user1" + user2 := "user2" + series1 := labels.FromStrings("__name__", "1") + series2 := labels.FromStrings("__name__", "2") + labelset1 := uint64(10) + labelset2 := uint64(20) + labelset3 := uint64(30) + labelsetId1 := "ten" + labelsetId2 := "twenty" + labelsetId3 := "thirty" + + tracker.Track(user1, series1.Hash(), labelset1, labelsetId1) + tracker.Track(user1, series1.Hash(), labelset2, labelsetId2) + + tracker.Track(user2, series1.Hash(), labelset1, labelsetId1) + tracker.Track(user2, series1.Hash(), labelset1, labelsetId1) + tracker.Track(user2, series1.Hash(), labelset1, labelsetId1) + tracker.Track(user2, series2.Hash(), labelset1, labelsetId1) + + require.Equal(t, tracker.getSeriesCount(user1, labelset1), 1) + require.Equal(t, tracker.getSeriesCount(user1, labelset2), 1) + require.Equal(t, tracker.getSeriesCount(user1, labelset3), 0) + + comparePerLabelsetSeriesVendedCount(t, gauge, user1, labelsetId1, 0) + comparePerLabelsetSeriesVendedCount(t, gauge, user1, labelsetId2, 0) + comparePerLabelsetSeriesVendedCount(t, gauge, user1, labelsetId3, 0) + + require.Equal(t, tracker.getSeriesCount(user2, labelset1), 2) + require.Equal(t, tracker.getSeriesCount(user2, labelset2), 0) + require.Equal(t, tracker.getSeriesCount(user2, labelset3), 0) + + comparePerLabelsetSeriesVendedCount(t, gauge, user2, labelsetId1, 0) + comparePerLabelsetSeriesVendedCount(t, gauge, user2, labelsetId2, 0) + comparePerLabelsetSeriesVendedCount(t, gauge, user2, labelsetId3, 0) + + tracker.UpdateMetrics() + + tracker.Track(user1, series1.Hash(), labelset1, labelsetId1) + tracker.Track(user1, series1.Hash(), labelset1, labelsetId1) + + require.Equal(t, tracker.getSeriesCount(user1, labelset1), 1) + require.Equal(t, tracker.getSeriesCount(user1, labelset2), 0) + require.Equal(t, tracker.getSeriesCount(user1, labelset3), 0) + + comparePerLabelsetSeriesVendedCount(t, gauge, user1, labelsetId1, 1) + comparePerLabelsetSeriesVendedCount(t, gauge, user1, labelsetId2, 1) + comparePerLabelsetSeriesVendedCount(t, gauge, user1, labelsetId3, 0) + + require.Equal(t, tracker.getSeriesCount(user2, labelset1), 0) + require.Equal(t, tracker.getSeriesCount(user2, labelset2), 0) + require.Equal(t, tracker.getSeriesCount(user2, labelset3), 0) + + comparePerLabelsetSeriesVendedCount(t, gauge, user2, labelsetId1, 2) + comparePerLabelsetSeriesVendedCount(t, gauge, user2, labelsetId2, 0) + comparePerLabelsetSeriesVendedCount(t, gauge, user2, labelsetId3, 0) + + tracker.UpdateMetrics() + + require.Equal(t, tracker.getSeriesCount(user1, labelset1), 0) + require.Equal(t, tracker.getSeriesCount(user1, labelset2), 0) + require.Equal(t, tracker.getSeriesCount(user1, labelset3), 0) + + comparePerLabelsetSeriesVendedCount(t, gauge, user1, labelsetId1, 1) + comparePerLabelsetSeriesVendedCount(t, gauge, user1, labelsetId2, 0) + comparePerLabelsetSeriesVendedCount(t, gauge, user1, labelsetId3, 0) + + require.Equal(t, tracker.getSeriesCount(user2, labelset1), 0) + require.Equal(t, tracker.getSeriesCount(user2, labelset2), 0) + require.Equal(t, tracker.getSeriesCount(user2, labelset3), 0) + + comparePerLabelsetSeriesVendedCount(t, gauge, user2, labelsetId1, 0) + comparePerLabelsetSeriesVendedCount(t, gauge, user2, labelsetId2, 0) + comparePerLabelsetSeriesVendedCount(t, gauge, user2, labelsetId3, 0) + + tracker.UpdateMetrics() + + require.Equal(t, tracker.getSeriesCount(user1, labelset1), 0) + require.Equal(t, tracker.getSeriesCount(user1, labelset2), 0) + require.Equal(t, tracker.getSeriesCount(user1, labelset3), 0) + + comparePerLabelsetSeriesVendedCount(t, gauge, user1, labelsetId1, 0) + comparePerLabelsetSeriesVendedCount(t, gauge, user1, labelsetId2, 0) + comparePerLabelsetSeriesVendedCount(t, gauge, user1, labelsetId3, 0) + + require.Equal(t, tracker.getSeriesCount(user2, labelset1), 0) + require.Equal(t, tracker.getSeriesCount(user2, labelset2), 0) + require.Equal(t, tracker.getSeriesCount(user2, labelset3), 0) + + comparePerLabelsetSeriesVendedCount(t, gauge, user2, labelsetId1, 0) + comparePerLabelsetSeriesVendedCount(t, gauge, user2, labelsetId2, 0) + comparePerLabelsetSeriesVendedCount(t, gauge, user2, labelsetId3, 0) +} + +func comparePerLabelsetSeriesVendedCount(t *testing.T, gaugeVec *prometheus.GaugeVec, user string, labelsetLimitId string, val int) { + gauge, _ := gaugeVec.GetMetricWithLabelValues("per_labelset_series_limit", user, labelsetLimitId) + require.Equal(t, testutil.ToFloat64(gauge), float64(val)) +} diff --git a/pkg/util/discardedseries/tracker.go b/pkg/util/discardedseries/tracker.go new file mode 100644 index 0000000000..82f6f33c6d --- /dev/null +++ b/pkg/util/discardedseries/tracker.go @@ -0,0 +1,133 @@ +package discardedseries + +import ( + "sync" + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +const ( + vendMetricsInterval = 30 * time.Second +) + +type seriesCounterStruct struct { + *sync.RWMutex + seriesCountMap map[uint64]struct{} + labelsetId string +} + +type userCounterStruct struct { + *sync.RWMutex + userSeriesMap map[string]*seriesCounterStruct +} + +type DiscardedSeriesTracker struct { + *sync.RWMutex + reasonUserMap map[string]*userCounterStruct + discardedSeriesGauge *prometheus.GaugeVec +} + +func NewDiscardedSeriesTracker(discardedSeriesGauge *prometheus.GaugeVec) *DiscardedSeriesTracker { + tracker := &DiscardedSeriesTracker{ + RWMutex: &sync.RWMutex{}, + reasonUserMap: make(map[string]*userCounterStruct), + discardedSeriesGauge: discardedSeriesGauge, + } + return tracker +} + +func (t *DiscardedSeriesTracker) Track(reason string, user string, series uint64) { + t.RLock() + userCounter, ok := t.reasonUserMap[reason] + t.RUnlock() + if !ok { + t.Lock() + userCounter, ok = t.reasonUserMap[reason] + if !ok { + userCounter = &userCounterStruct{ + RWMutex: &sync.RWMutex{}, + userSeriesMap: make(map[string]*seriesCounterStruct), + } + t.reasonUserMap[reason] = userCounter + } + t.Unlock() + } + + userCounter.RLock() + seriesCounter, ok := userCounter.userSeriesMap[user] + userCounter.RUnlock() + if !ok { + userCounter.Lock() + seriesCounter, ok = userCounter.userSeriesMap[user] + if !ok { + seriesCounter = &seriesCounterStruct{ + RWMutex: &sync.RWMutex{}, + seriesCountMap: make(map[uint64]struct{}), + } + userCounter.userSeriesMap[user] = seriesCounter + } + userCounter.Unlock() + } + + seriesCounter.RLock() + _, ok = seriesCounter.seriesCountMap[series] + seriesCounter.RUnlock() + if !ok { + seriesCounter.Lock() + _, ok = seriesCounter.seriesCountMap[series] + if !ok { + seriesCounter.seriesCountMap[series] = struct{}{} + } + seriesCounter.Unlock() + } +} + +func (t *DiscardedSeriesTracker) UpdateMetrics() { + usersToDelete := make([]string, 0) + t.RLock() + for reason, userCounter := range t.reasonUserMap { + userCounter.RLock() + for user, seriesCounter := range userCounter.userSeriesMap { + seriesCounter.Lock() + count := len(seriesCounter.seriesCountMap) + t.discardedSeriesGauge.WithLabelValues(reason, user).Set(float64(count)) + clear(seriesCounter.seriesCountMap) + if count == 0 { + usersToDelete = append(usersToDelete, user) + } + seriesCounter.Unlock() + } + userCounter.RUnlock() + if len(usersToDelete) > 0 { + userCounter.Lock() + for _, user := range usersToDelete { + if _, ok := userCounter.userSeriesMap[user]; ok { + t.discardedSeriesGauge.DeleteLabelValues(reason, user) + delete(userCounter.userSeriesMap, user) + } + } + userCounter.Unlock() + } + } + t.RUnlock() +} + +func (t *DiscardedSeriesTracker) StartVendDiscardedSeriesMetricGoroutine() { + go func() { + ticker := time.NewTicker(vendMetricsInterval) + for range ticker.C { + t.UpdateMetrics() + } + }() +} + +// only used in testing +func (t *DiscardedSeriesTracker) getSeriesCount(reason string, user string) int { + if userCounter, ok := t.reasonUserMap[reason]; ok { + if seriesCounter, ok := userCounter.userSeriesMap[user]; ok { + return len(seriesCounter.seriesCountMap) + } + } + return 0 +} diff --git a/pkg/util/discardedseries/tracker_test.go b/pkg/util/discardedseries/tracker_test.go new file mode 100644 index 0000000000..8893907a09 --- /dev/null +++ b/pkg/util/discardedseries/tracker_test.go @@ -0,0 +1,115 @@ +package discardedseries + +import ( + "testing" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/testutil" + "github.com/prometheus/prometheus/model/labels" + "github.com/stretchr/testify/require" +) + +func TestDiscardedSeriesTracker(t *testing.T) { + gauge := prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "cortex_discarded_series", + Help: "The number of series that include discarded samples.", + }, + []string{"reason", "user"}, + ) + + tracker := NewDiscardedSeriesTracker(gauge) + reason1 := "sample_out_of_bounds" + reason2 := "label_2" + reason3 := "unused_label" + user1 := "user1" + user2 := "user2" + series1 := labels.FromStrings("__name__", "1") + series2 := labels.FromStrings("__name__", "2") + + tracker.Track(reason1, user1, series1.Hash()) + tracker.Track(reason2, user1, series1.Hash()) + + tracker.Track(reason1, user2, series1.Hash()) + tracker.Track(reason1, user2, series1.Hash()) + tracker.Track(reason1, user2, series1.Hash()) + tracker.Track(reason1, user2, series2.Hash()) + + require.Equal(t, tracker.getSeriesCount(reason1, user1), 1) + require.Equal(t, tracker.getSeriesCount(reason2, user1), 1) + require.Equal(t, tracker.getSeriesCount(reason3, user1), 0) + + compareSeriesVendedCount(t, gauge, reason1, user1, 0) + compareSeriesVendedCount(t, gauge, reason2, user1, 0) + compareSeriesVendedCount(t, gauge, reason3, user1, 0) + + require.Equal(t, tracker.getSeriesCount(reason1, user2), 2) + require.Equal(t, tracker.getSeriesCount(reason2, user2), 0) + require.Equal(t, tracker.getSeriesCount(reason3, user2), 0) + + compareSeriesVendedCount(t, gauge, reason1, user2, 0) + compareSeriesVendedCount(t, gauge, reason2, user2, 0) + compareSeriesVendedCount(t, gauge, reason3, user2, 0) + + tracker.UpdateMetrics() + + tracker.Track(reason1, user1, series1.Hash()) + tracker.Track(reason1, user1, series1.Hash()) + + require.Equal(t, tracker.getSeriesCount(reason1, user1), 1) + require.Equal(t, tracker.getSeriesCount(reason2, user1), 0) + require.Equal(t, tracker.getSeriesCount(reason3, user1), 0) + + compareSeriesVendedCount(t, gauge, reason1, user1, 1) + compareSeriesVendedCount(t, gauge, reason2, user1, 1) + compareSeriesVendedCount(t, gauge, reason3, user1, 0) + + require.Equal(t, tracker.getSeriesCount(reason1, user2), 0) + require.Equal(t, tracker.getSeriesCount(reason2, user2), 0) + require.Equal(t, tracker.getSeriesCount(reason3, user2), 0) + + compareSeriesVendedCount(t, gauge, reason1, user2, 2) + compareSeriesVendedCount(t, gauge, reason2, user2, 0) + compareSeriesVendedCount(t, gauge, reason3, user2, 0) + + tracker.UpdateMetrics() + + require.Equal(t, tracker.getSeriesCount(reason1, user1), 0) + require.Equal(t, tracker.getSeriesCount(reason2, user1), 0) + require.Equal(t, tracker.getSeriesCount(reason3, user1), 0) + + compareSeriesVendedCount(t, gauge, reason1, user1, 1) + compareSeriesVendedCount(t, gauge, reason2, user1, 0) + compareSeriesVendedCount(t, gauge, reason3, user1, 0) + + require.Equal(t, tracker.getSeriesCount(reason1, user2), 0) + require.Equal(t, tracker.getSeriesCount(reason2, user2), 0) + require.Equal(t, tracker.getSeriesCount(reason3, user2), 0) + + compareSeriesVendedCount(t, gauge, reason1, user2, 0) + compareSeriesVendedCount(t, gauge, reason2, user2, 0) + compareSeriesVendedCount(t, gauge, reason3, user2, 0) + + tracker.UpdateMetrics() + + require.Equal(t, tracker.getSeriesCount(reason1, user1), 0) + require.Equal(t, tracker.getSeriesCount(reason2, user1), 0) + require.Equal(t, tracker.getSeriesCount(reason3, user1), 0) + + compareSeriesVendedCount(t, gauge, reason1, user1, 0) + compareSeriesVendedCount(t, gauge, reason2, user1, 0) + compareSeriesVendedCount(t, gauge, reason3, user1, 0) + + require.Equal(t, tracker.getSeriesCount(reason1, user2), 0) + require.Equal(t, tracker.getSeriesCount(reason2, user2), 0) + require.Equal(t, tracker.getSeriesCount(reason3, user2), 0) + + compareSeriesVendedCount(t, gauge, reason1, user2, 0) + compareSeriesVendedCount(t, gauge, reason2, user2, 0) + compareSeriesVendedCount(t, gauge, reason3, user2, 0) +} + +func compareSeriesVendedCount(t *testing.T, gaugeVec *prometheus.GaugeVec, reason string, user string, val int) { + gauge, _ := gaugeVec.GetMetricWithLabelValues(reason, user) + require.Equal(t, testutil.ToFloat64(gauge), float64(val)) +} diff --git a/pkg/util/events.go b/pkg/util/events.go index 312f437148..07453ad19b 100644 --- a/pkg/util/events.go +++ b/pkg/util/events.go @@ -13,8 +13,8 @@ import ( var ( // interface{} vars to avoid allocation on every call - key interface{} = "level" // masquerade as a level like debug, warn - event interface{} = "event" + key any = "level" // masquerade as a level like debug, warn + event any = "event" eventLogger = log.NewNopLogger() ) @@ -46,7 +46,7 @@ type samplingFilter struct { count atomic.Int64 } -func (e *samplingFilter) Log(keyvals ...interface{}) error { +func (e *samplingFilter) Log(keyvals ...any) error { count := e.count.Inc() if count%int64(e.freq) == 0 { return e.next.Log(keyvals...) diff --git a/pkg/util/fakeauth/fake_auth.go b/pkg/util/fakeauth/fake_auth.go index 92207983dc..c4f538a583 100644 --- a/pkg/util/fakeauth/fake_auth.go +++ b/pkg/util/fakeauth/fake_auth.go @@ -20,7 +20,7 @@ func SetupAuthMiddleware(config *server.Config, enabled bool, noGRPCAuthOn []str ignoredMethods[m] = true } - config.GRPCMiddleware = append(config.GRPCMiddleware, func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { + config.GRPCMiddleware = append(config.GRPCMiddleware, func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) { if ignoredMethods[info.FullMethod] { return handler(ctx, req) } @@ -28,7 +28,7 @@ func SetupAuthMiddleware(config *server.Config, enabled bool, noGRPCAuthOn []str }) config.GRPCStreamMiddleware = append(config.GRPCStreamMiddleware, - func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + func(srv any, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { if ignoredMethods[info.FullMethod] { return handler(srv, ss) } @@ -55,12 +55,12 @@ var fakeHTTPAuthMiddleware = middleware.Func(func(next http.Handler) http.Handle }) }) -var fakeGRPCAuthUniaryMiddleware = func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { +var fakeGRPCAuthUniaryMiddleware = func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) { ctx = user.InjectOrgID(ctx, "fake") return handler(ctx, req) } -var fakeGRPCAuthStreamMiddleware = func(srv interface{}, ss grpc.ServerStream, _ *grpc.StreamServerInfo, handler grpc.StreamHandler) error { +var fakeGRPCAuthStreamMiddleware = func(srv any, ss grpc.ServerStream, _ *grpc.StreamServerInfo, handler grpc.StreamHandler) error { ctx := user.InjectOrgID(ss.Context(), "fake") return handler(srv, serverStream{ ctx: ctx, diff --git a/pkg/util/flagext/cidr.go b/pkg/util/flagext/cidr.go index 72b93b680c..bb7a19c537 100644 --- a/pkg/util/flagext/cidr.go +++ b/pkg/util/flagext/cidr.go @@ -46,9 +46,9 @@ func (c CIDRSliceCSV) String() string { // Set implements flag.Value func (c *CIDRSliceCSV) Set(s string) error { - parts := strings.Split(s, ",") + parts := strings.SplitSeq(s, ",") - for _, part := range parts { + for part := range parts { cidr := &CIDR{} if err := cidr.Set(part); err != nil { return errors.Wrapf(err, "cidr: %s", part) @@ -61,7 +61,7 @@ func (c *CIDRSliceCSV) Set(s string) error { } // UnmarshalYAML implements yaml.Unmarshaler. -func (c *CIDRSliceCSV) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (c *CIDRSliceCSV) UnmarshalYAML(unmarshal func(any) error) error { var s string if err := unmarshal(&s); err != nil { return err @@ -77,6 +77,6 @@ func (c *CIDRSliceCSV) UnmarshalYAML(unmarshal func(interface{}) error) error { } // MarshalYAML implements yaml.Marshaler. -func (c CIDRSliceCSV) MarshalYAML() (interface{}, error) { +func (c CIDRSliceCSV) MarshalYAML() (any, error) { return c.String(), nil } diff --git a/pkg/util/flagext/day.go b/pkg/util/flagext/day.go index 9db695c832..30aa897af6 100644 --- a/pkg/util/flagext/day.go +++ b/pkg/util/flagext/day.go @@ -45,7 +45,7 @@ func (v *DayValue) IsSet() bool { } // UnmarshalYAML implements yaml.Unmarshaler. -func (v *DayValue) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (v *DayValue) UnmarshalYAML(unmarshal func(any) error) error { var s string if err := unmarshal(&s); err != nil { return err @@ -54,6 +54,6 @@ func (v *DayValue) UnmarshalYAML(unmarshal func(interface{}) error) error { } // MarshalYAML implements yaml.Marshaler. -func (v DayValue) MarshalYAML() (interface{}, error) { +func (v DayValue) MarshalYAML() (any, error) { return v.Time.Time().UTC().Format("2006-01-02"), nil } diff --git a/pkg/util/flagext/secret.go b/pkg/util/flagext/secret.go index aa7101b149..e588b4a24a 100644 --- a/pkg/util/flagext/secret.go +++ b/pkg/util/flagext/secret.go @@ -16,7 +16,7 @@ func (v *Secret) Set(s string) error { } // UnmarshalYAML implements yaml.Unmarshaler. -func (v *Secret) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (v *Secret) UnmarshalYAML(unmarshal func(any) error) error { var s string if err := unmarshal(&s); err != nil { return err @@ -26,7 +26,7 @@ func (v *Secret) UnmarshalYAML(unmarshal func(interface{}) error) error { } // MarshalYAML implements yaml.Marshaler. -func (v Secret) MarshalYAML() (interface{}, error) { +func (v Secret) MarshalYAML() (any, error) { if len(v.Value) == 0 { return "", nil } diff --git a/pkg/util/flagext/stringslicecsv.go b/pkg/util/flagext/stringslicecsv.go index 47ccd54ca0..1f1aff6f1c 100644 --- a/pkg/util/flagext/stringslicecsv.go +++ b/pkg/util/flagext/stringslicecsv.go @@ -18,7 +18,7 @@ func (v *StringSliceCSV) Set(s string) error { } // UnmarshalYAML implements yaml.Unmarshaler. -func (v *StringSliceCSV) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (v *StringSliceCSV) UnmarshalYAML(unmarshal func(any) error) error { var s string if err := unmarshal(&s); err != nil { return err @@ -28,6 +28,6 @@ func (v *StringSliceCSV) UnmarshalYAML(unmarshal func(interface{}) error) error } // MarshalYAML implements yaml.Marshaler. -func (v StringSliceCSV) MarshalYAML() (interface{}, error) { +func (v StringSliceCSV) MarshalYAML() (any, error) { return v.String(), nil } diff --git a/pkg/util/flagext/time.go b/pkg/util/flagext/time.go index 452857e9de..c00d0b7d2b 100644 --- a/pkg/util/flagext/time.go +++ b/pkg/util/flagext/time.go @@ -46,7 +46,7 @@ func (t *Time) Set(s string) error { } // UnmarshalYAML implements yaml.Unmarshaler. -func (t *Time) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (t *Time) UnmarshalYAML(unmarshal func(any) error) error { var s string if err := unmarshal(&s); err != nil { return err @@ -55,6 +55,6 @@ func (t *Time) UnmarshalYAML(unmarshal func(interface{}) error) error { } // MarshalYAML implements yaml.Marshaler. -func (t Time) MarshalYAML() (interface{}, error) { +func (t Time) MarshalYAML() (any, error) { return t.String(), nil } diff --git a/pkg/util/flagext/url.go b/pkg/util/flagext/url.go index 3b3b8303be..338a0fb870 100644 --- a/pkg/util/flagext/url.go +++ b/pkg/util/flagext/url.go @@ -26,7 +26,7 @@ func (v *URLValue) Set(s string) error { } // UnmarshalYAML implements yaml.Unmarshaler. -func (v *URLValue) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (v *URLValue) UnmarshalYAML(unmarshal func(any) error) error { var s string if err := unmarshal(&s); err != nil { return err @@ -42,7 +42,7 @@ func (v *URLValue) UnmarshalYAML(unmarshal func(interface{}) error) error { } // MarshalYAML implements yaml.Marshaler. -func (v URLValue) MarshalYAML() (interface{}, error) { +func (v URLValue) MarshalYAML() (any, error) { if v.URL == nil { return "", nil } diff --git a/pkg/util/grpcclient/backoff_retry.go b/pkg/util/grpcclient/backoff_retry.go index 525497e6bf..c50fffeeee 100644 --- a/pkg/util/grpcclient/backoff_retry.go +++ b/pkg/util/grpcclient/backoff_retry.go @@ -12,7 +12,7 @@ import ( // NewBackoffRetry gRPC middleware. func NewBackoffRetry(cfg backoff.Config) grpc.UnaryClientInterceptor { - return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + return func(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { backoff := backoff.New(ctx, cfg) for backoff.Ongoing() { err := invoker(ctx, method, req, reply, cc, opts...) diff --git a/pkg/util/grpcclient/health_check_test.go b/pkg/util/grpcclient/health_check_test.go index 7d2b37c37c..bede9bdab1 100644 --- a/pkg/util/grpcclient/health_check_test.go +++ b/pkg/util/grpcclient/health_check_test.go @@ -76,17 +76,17 @@ func TestNewHealthCheckService(t *testing.T) { // Generate healthcheck error and wait instance to become unhealthy hMock.err.Store(errors.New("some error")) - cortex_testutil.Poll(t, 5*time.Second, false, func() interface{} { + cortex_testutil.Poll(t, 5*time.Second, false, func() any { return instances[0].isHealthy() }) // Mark instance back to a healthy state hMock.err.Store(nil) - cortex_testutil.Poll(t, 5*time.Second, true, func() interface{} { + cortex_testutil.Poll(t, 5*time.Second, true, func() any { return instances[0].isHealthy() }) - cortex_testutil.Poll(t, i.instanceGcTimeout*2, 0, func() interface{} { + cortex_testutil.Poll(t, i.instanceGcTimeout*2, 0, func() any { return len(i.registeredInstances()) }) @@ -137,7 +137,7 @@ func TestNewHealthCheckInterceptors(t *testing.T) { require.NoError(t, i.iteration(context.Background())) require.False(t, hMock.open.Load()) - cortex_testutil.Poll(t, time.Second, true, func() interface{} { + cortex_testutil.Poll(t, time.Second, true, func() any { err := ui(context.Background(), "", struct{}{}, struct{}{}, ccUnhealthy, invoker) return errors.Is(err, unhealthyErr) || status.Code(err) == codes.Unavailable }) @@ -148,7 +148,7 @@ func TestNewHealthCheckInterceptors(t *testing.T) { // Should mark the instance back to healthy hMock.err.Store(nil) require.NoError(t, i.iteration(context.Background())) - cortex_testutil.Poll(t, time.Second, true, func() interface{} { + cortex_testutil.Poll(t, time.Second, true, func() any { return ui(context.Background(), "", struct{}{}, struct{}{}, ccUnhealthy, invoker) == nil }) } diff --git a/pkg/util/grpcclient/ratelimit.go b/pkg/util/grpcclient/ratelimit.go index 59ba3b7f08..09ee645b22 100644 --- a/pkg/util/grpcclient/ratelimit.go +++ b/pkg/util/grpcclient/ratelimit.go @@ -16,7 +16,7 @@ func NewRateLimiter(cfg *Config) grpc.UnaryClientInterceptor { burst = int(cfg.RateLimit) } limiter := rate.NewLimiter(rate.Limit(cfg.RateLimit), burst) - return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + return func(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { err := limiter.Wait(ctx) if err != nil { return status.Error(codes.ResourceExhausted, err.Error()) diff --git a/pkg/util/grpcclient/ratelimit_test.go b/pkg/util/grpcclient/ratelimit_test.go index 6a8d6345b9..a4f704b7e9 100644 --- a/pkg/util/grpcclient/ratelimit_test.go +++ b/pkg/util/grpcclient/ratelimit_test.go @@ -18,7 +18,7 @@ func TestRateLimiterFailureResultsInResourceExhaustedError(t *testing.T) { RateLimit: 0, } conn := grpc.ClientConn{} - invoker := func(currentCtx context.Context, currentMethod string, currentReq, currentRepl interface{}, currentConn *grpc.ClientConn, currentOpts ...grpc.CallOption) error { + invoker := func(currentCtx context.Context, currentMethod string, currentReq, currentRepl any, currentConn *grpc.ClientConn, currentOpts ...grpc.CallOption) error { return nil } diff --git a/pkg/util/grpcclient/signing_handler.go b/pkg/util/grpcclient/signing_handler.go index d5b7803f28..c402c963aa 100644 --- a/pkg/util/grpcclient/signing_handler.go +++ b/pkg/util/grpcclient/signing_handler.go @@ -27,7 +27,7 @@ type SignRequest interface { VerifySign(context.Context, string) (bool, error) } -func UnarySigningServerInterceptor(ctx context.Context, req interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { +func UnarySigningServerInterceptor(ctx context.Context, req any, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) { rs, ok := req.(SignRequest) if !ok { return handler(ctx, req) @@ -58,7 +58,7 @@ func UnarySigningServerInterceptor(ctx context.Context, req interface{}, _ *grpc return handler(ctx, req) } -func UnarySigningClientInterceptor(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { +func UnarySigningClientInterceptor(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { rs, ok := req.(SignRequest) if !ok { diff --git a/pkg/util/grpcclient/signing_handler_test.go b/pkg/util/grpcclient/signing_handler_test.go index 4682b34a45..07193055a0 100644 --- a/pkg/util/grpcclient/signing_handler_test.go +++ b/pkg/util/grpcclient/signing_handler_test.go @@ -18,7 +18,7 @@ func TestUnarySigningHandler(t *testing.T) { w := &cortexpb.WriteRequest{} // Sign Request - err := UnarySigningClientInterceptor(ctx, "", w, w, nil, func(c context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, opts ...grpc.CallOption) error { + err := UnarySigningClientInterceptor(ctx, "", w, w, nil, func(c context.Context, method string, req, reply any, cc *grpc.ClientConn, opts ...grpc.CallOption) error { ctx = c return nil }) @@ -34,14 +34,14 @@ func TestUnarySigningHandler(t *testing.T) { ctx = metadata.NewIncomingContext(ctx, md) // Verify signature on the server side - _, err = UnarySigningServerInterceptor(ctx, w, nil, func(ctx context.Context, req interface{}) (interface{}, error) { + _, err = UnarySigningServerInterceptor(ctx, w, nil, func(ctx context.Context, req any) (any, error) { return nil, nil }) require.NoError(t, err) // Change user id and make sure the request signature mismatch ctx = user.InjectOrgID(ctx, "user-2") - _, err = UnarySigningServerInterceptor(ctx, w, nil, func(ctx context.Context, req interface{}) (interface{}, error) { + _, err = UnarySigningServerInterceptor(ctx, w, nil, func(ctx context.Context, req any) (any, error) { return nil, nil }) @@ -50,7 +50,7 @@ func TestUnarySigningHandler(t *testing.T) { // Return error when signature is not present ctx = user.InjectOrgID(context.Background(), "user-") - _, err = UnarySigningServerInterceptor(ctx, w, nil, func(ctx context.Context, req interface{}) (interface{}, error) { + _, err = UnarySigningServerInterceptor(ctx, w, nil, func(ctx context.Context, req any) (any, error) { return nil, nil }) @@ -59,7 +59,7 @@ func TestUnarySigningHandler(t *testing.T) { // Return error when multiples signatures are present md[reqSignHeaderName] = append(md[reqSignHeaderName], "sig1", "sig2") ctx = metadata.NewOutgoingContext(ctx, md) - err = UnarySigningClientInterceptor(ctx, "", w, w, nil, func(c context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, opts ...grpc.CallOption) error { + err = UnarySigningClientInterceptor(ctx, "", w, w, nil, func(c context.Context, method string, req, reply any, cc *grpc.ClientConn, opts ...grpc.CallOption) error { ctx = c return nil }) diff --git a/pkg/util/grpcclient/unwrap.go b/pkg/util/grpcclient/unwrap.go index be82fdcd14..ee1e0129da 100644 --- a/pkg/util/grpcclient/unwrap.go +++ b/pkg/util/grpcclient/unwrap.go @@ -25,7 +25,7 @@ type unwrapErrorClientStream struct { grpc.ClientStream } -func (s *unwrapErrorClientStream) RecvMsg(m interface{}) error { +func (s *unwrapErrorClientStream) RecvMsg(m any) error { err := s.ClientStream.RecvMsg(m) if err != nil { // Try to unwrap the error to get the original error diff --git a/pkg/util/grpcclient/unwrap_test.go b/pkg/util/grpcclient/unwrap_test.go index b518bde12a..ef6e31ce42 100644 --- a/pkg/util/grpcclient/unwrap_test.go +++ b/pkg/util/grpcclient/unwrap_test.go @@ -17,7 +17,7 @@ type mockClientStream struct { recvErr error } -func (m *mockClientStream) RecvMsg(msg interface{}) error { +func (m *mockClientStream) RecvMsg(msg any) error { return m.recvErr } @@ -37,7 +37,7 @@ func (m *mockClientStream) Context() context.Context { return context.Background() } -func (m *mockClientStream) SendMsg(interface{}) error { +func (m *mockClientStream) SendMsg(any) error { return nil } @@ -78,7 +78,7 @@ func TestUnwrapErrorStreamClientInterceptor(t *testing.T) { ctx := context.Background() stream, err := chainedStreamer(ctx, &grpc.StreamDesc{}, nil, "test") require.NoError(t, err) - var msg interface{} + var msg any err = stream.RecvMsg(&msg) require.Error(t, err) require.EqualError(t, err, originalErr.Error()) diff --git a/pkg/util/grpcencoding/encoding_test.go b/pkg/util/grpcencoding/encoding_test.go index 4d80ffb28e..4c0d2389fc 100644 --- a/pkg/util/grpcencoding/encoding_test.go +++ b/pkg/util/grpcencoding/encoding_test.go @@ -104,8 +104,7 @@ func BenchmarkCompress(b *testing.B) { for _, tc := range testCases { b.Run(tc.name, func(b *testing.B) { c := encoding.GetCompressor(tc.name) - b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { w, _ := c.Compress(io.Discard) _, _ = w.Write(data) _ = w.Close() @@ -139,8 +138,7 @@ func BenchmarkDecompress(b *testing.B) { w, _ := c.Compress(&buf) _, _ = w.Write(data) w.Close() - b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { _, _, err := decompress(c, buf.Bytes(), 10000) require.NoError(b, err) } diff --git a/pkg/util/grpcencoding/snappy/snappy.go b/pkg/util/grpcencoding/snappy/snappy.go index fe01b4ca35..022b068301 100644 --- a/pkg/util/grpcencoding/snappy/snappy.go +++ b/pkg/util/grpcencoding/snappy/snappy.go @@ -23,12 +23,12 @@ type compressor struct { func newCompressor() *compressor { c := &compressor{} c.readersPool = sync.Pool{ - New: func() interface{} { + New: func() any { return snappy.NewReader(nil) }, } c.writersPool = sync.Pool{ - New: func() interface{} { + New: func() any { return snappy.NewBufferedWriter(nil) }, } diff --git a/pkg/util/grpcencoding/snappyblock/snappyblock.go b/pkg/util/grpcencoding/snappyblock/snappyblock.go index a40e8429dd..ce4db92912 100644 --- a/pkg/util/grpcencoding/snappyblock/snappyblock.go +++ b/pkg/util/grpcencoding/snappyblock/snappyblock.go @@ -24,7 +24,7 @@ type compressor struct { func newCompressor() *compressor { c := &compressor{} c.readersPool = sync.Pool{ - New: func() interface{} { + New: func() any { return &reader{ pool: &c.readersPool, cbuff: bytes.NewBuffer(make([]byte, 0, 512)), @@ -32,7 +32,7 @@ func newCompressor() *compressor { }, } c.writersPool = sync.Pool{ - New: func() interface{} { + New: func() any { return &writeCloser{ pool: &c.writersPool, buff: bytes.NewBuffer(make([]byte, 0, 512)), diff --git a/pkg/util/grpcutil/grpc_interceptors_test.go b/pkg/util/grpcutil/grpc_interceptors_test.go index 6a0011c9a9..81788d22d7 100644 --- a/pkg/util/grpcutil/grpc_interceptors_test.go +++ b/pkg/util/grpcutil/grpc_interceptors_test.go @@ -8,7 +8,7 @@ import ( "github.com/stretchr/testify/require" "google.golang.org/grpc/metadata" - util_log "github.com/cortexproject/cortex/pkg/util/log" + "github.com/cortexproject/cortex/pkg/util/requestmeta" ) func TestHTTPHeaderPropagationClientInterceptor(t *testing.T) { @@ -18,14 +18,14 @@ func TestHTTPHeaderPropagationClientInterceptor(t *testing.T) { contentsMap["TestHeader1"] = "RequestID" contentsMap["TestHeader2"] = "ContentsOfTestHeader2" contentsMap["Test3"] = "SomeInformation" - ctx = util_log.ContextWithHeaderMap(ctx, contentsMap) + ctx = requestmeta.ContextWithRequestMetadataMap(ctx, contentsMap) - ctx = injectForwardedHeadersIntoMetadata(ctx) + ctx = injectForwardedRequestMetadata(ctx) md, ok := metadata.FromOutgoingContext(ctx) require.True(t, ok) - headers := md[util_log.HeaderPropagationStringForRequestLogging] + headers := md[requestmeta.PropagationStringForRequestMetadata] assert.Equal(t, 6, len(headers)) assert.Contains(t, headers, "TestHeader1") assert.Contains(t, headers, "TestHeader2") @@ -37,20 +37,20 @@ func TestHTTPHeaderPropagationClientInterceptor(t *testing.T) { func TestExistingValuesInMetadataForHTTPPropagationClientInterceptor(t *testing.T) { ctx := context.Background() - ctx = metadata.AppendToOutgoingContext(ctx, util_log.HeaderPropagationStringForRequestLogging, "testabc123") + ctx = metadata.AppendToOutgoingContext(ctx, requestmeta.PropagationStringForRequestMetadata, "testabc123") contentsMap := make(map[string]string) contentsMap["TestHeader1"] = "RequestID" contentsMap["TestHeader2"] = "ContentsOfTestHeader2" contentsMap["Test3"] = "SomeInformation" - ctx = util_log.ContextWithHeaderMap(ctx, contentsMap) + ctx = requestmeta.ContextWithRequestMetadataMap(ctx, contentsMap) - ctx = injectForwardedHeadersIntoMetadata(ctx) + ctx = injectForwardedRequestMetadata(ctx) md, ok := metadata.FromOutgoingContext(ctx) require.True(t, ok) - contents := md[util_log.HeaderPropagationStringForRequestLogging] + contents := md[requestmeta.PropagationStringForRequestMetadata] assert.Contains(t, contents, "testabc123") assert.Equal(t, 1, len(contents)) } @@ -63,14 +63,14 @@ func TestGRPCHeaderInjectionForHTTPPropagationServerInterceptor(t *testing.T) { testMap["TestHeader2"] = "Results2" ctx = metadata.NewOutgoingContext(ctx, nil) - ctx = util_log.ContextWithHeaderMap(ctx, testMap) - ctx = injectForwardedHeadersIntoMetadata(ctx) + ctx = requestmeta.ContextWithRequestMetadataMap(ctx, testMap) + ctx = injectForwardedRequestMetadata(ctx) md, ok := metadata.FromOutgoingContext(ctx) require.True(t, ok) - ctx = util_log.ContextWithHeaderMapFromMetadata(ctx, md) + ctx = requestmeta.ContextWithRequestMetadataMapFromMetadata(ctx, md) - headersMap := util_log.HeaderMapFromContext(ctx) + headersMap := requestmeta.MapFromContext(ctx) require.NotNil(t, headersMap) assert.Equal(t, 2, len(headersMap)) @@ -82,11 +82,11 @@ func TestGRPCHeaderInjectionForHTTPPropagationServerInterceptor(t *testing.T) { func TestGRPCHeaderDifferentLengthsForHTTPPropagationServerInterceptor(t *testing.T) { ctx := context.Background() - ctx = metadata.AppendToOutgoingContext(ctx, util_log.HeaderPropagationStringForRequestLogging, "Test123") - ctx = metadata.AppendToOutgoingContext(ctx, util_log.HeaderPropagationStringForRequestLogging, "Results") - ctx = metadata.AppendToOutgoingContext(ctx, util_log.HeaderPropagationStringForRequestLogging, "Results2") + ctx = metadata.AppendToOutgoingContext(ctx, requestmeta.PropagationStringForRequestMetadata, "Test123") + ctx = metadata.AppendToOutgoingContext(ctx, requestmeta.PropagationStringForRequestMetadata, "Results") + ctx = metadata.AppendToOutgoingContext(ctx, requestmeta.PropagationStringForRequestMetadata, "Results2") - ctx = extractForwardedHeadersFromMetadata(ctx) + ctx = extractForwardedRequestMetadataFromMetadata(ctx) - assert.Nil(t, util_log.HeaderMapFromContext(ctx)) + assert.Nil(t, requestmeta.MapFromContext(ctx)) } diff --git a/pkg/util/grpcutil/health_check.go b/pkg/util/grpcutil/health_check.go index e6883447fb..b37ee5dd85 100644 --- a/pkg/util/grpcutil/health_check.go +++ b/pkg/util/grpcutil/health_check.go @@ -32,19 +32,6 @@ func (h *HealthCheck) Check(_ context.Context, _ *grpc_health_v1.HealthCheckRequ return &grpc_health_v1.HealthCheckResponse{Status: grpc_health_v1.HealthCheckResponse_SERVING}, nil } -func (h *HealthCheck) List(ctx context.Context, request *grpc_health_v1.HealthListRequest) (*grpc_health_v1.HealthListResponse, error) { - checkResp, err := h.Check(ctx, nil) - if err != nil { - return &grpc_health_v1.HealthListResponse{}, err - } - - return &grpc_health_v1.HealthListResponse{ - Statuses: map[string]*grpc_health_v1.HealthCheckResponse{ - "server": checkResp, - }, - }, nil -} - // Watch implements the grpc healthcheck. func (h *HealthCheck) Watch(_ *grpc_health_v1.HealthCheckRequest, _ grpc_health_v1.Health_WatchServer) error { return status.Error(codes.Unimplemented, "Watching is not supported") diff --git a/pkg/util/grpcutil/naming.go b/pkg/util/grpcutil/naming.go index 8029324406..701f702bc8 100644 --- a/pkg/util/grpcutil/naming.go +++ b/pkg/util/grpcutil/naming.go @@ -21,7 +21,7 @@ type Update struct { Addr string // Metadata is the updated metadata. It is nil if there is no metadata update. // Metadata is not required for a custom naming implementation. - Metadata interface{} + Metadata any } // Watcher watches for SRV updates on the specified target. diff --git a/pkg/util/grpcutil/util.go b/pkg/util/grpcutil/util.go index 8da1c6916e..b9e4da4afd 100644 --- a/pkg/util/grpcutil/util.go +++ b/pkg/util/grpcutil/util.go @@ -8,7 +8,7 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" - util_log "github.com/cortexproject/cortex/pkg/util/log" + "github.com/cortexproject/cortex/pkg/util/requestmeta" ) type wrappedServerStream struct { @@ -33,50 +33,51 @@ func IsGRPCContextCanceled(err error) bool { // HTTPHeaderPropagationServerInterceptor allows for propagation of HTTP Request headers across gRPC calls - works // alongside HTTPHeaderPropagationClientInterceptor -func HTTPHeaderPropagationServerInterceptor(ctx context.Context, req interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { - ctx = extractForwardedHeadersFromMetadata(ctx) +func HTTPHeaderPropagationServerInterceptor(ctx context.Context, req any, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) { + ctx = extractForwardedRequestMetadataFromMetadata(ctx) h, err := handler(ctx, req) return h, err } // HTTPHeaderPropagationStreamServerInterceptor does the same as HTTPHeaderPropagationServerInterceptor but for streams -func HTTPHeaderPropagationStreamServerInterceptor(srv interface{}, ss grpc.ServerStream, _ *grpc.StreamServerInfo, handler grpc.StreamHandler) error { +func HTTPHeaderPropagationStreamServerInterceptor(srv any, ss grpc.ServerStream, _ *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + ctx := extractForwardedRequestMetadataFromMetadata(ss.Context()) return handler(srv, wrappedServerStream{ - ctx: extractForwardedHeadersFromMetadata(ss.Context()), + ctx: ctx, ServerStream: ss, }) } -// extractForwardedHeadersFromMetadata implements HTTPHeaderPropagationServerInterceptor by placing forwarded +// extractForwardedRequestMetadataFromMetadata implements HTTPHeaderPropagationServerInterceptor by placing forwarded // headers into incoming context -func extractForwardedHeadersFromMetadata(ctx context.Context) context.Context { +func extractForwardedRequestMetadataFromMetadata(ctx context.Context) context.Context { md, ok := metadata.FromIncomingContext(ctx) if !ok { return ctx } - return util_log.ContextWithHeaderMapFromMetadata(ctx, md) + return requestmeta.ContextWithRequestMetadataMapFromMetadata(ctx, md) } // HTTPHeaderPropagationClientInterceptor allows for propagation of HTTP Request headers across gRPC calls - works // alongside HTTPHeaderPropagationServerInterceptor -func HTTPHeaderPropagationClientInterceptor(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, +func HTTPHeaderPropagationClientInterceptor(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { - ctx = injectForwardedHeadersIntoMetadata(ctx) + ctx = injectForwardedRequestMetadata(ctx) return invoker(ctx, method, req, reply, cc, opts...) } // HTTPHeaderPropagationStreamClientInterceptor does the same as HTTPHeaderPropagationClientInterceptor but for streams func HTTPHeaderPropagationStreamClientInterceptor(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { - ctx = injectForwardedHeadersIntoMetadata(ctx) + ctx = injectForwardedRequestMetadata(ctx) return streamer(ctx, desc, cc, method, opts...) } -// injectForwardedHeadersIntoMetadata implements HTTPHeaderPropagationClientInterceptor and HTTPHeaderPropagationStreamClientInterceptor +// injectForwardedRequestMetadata implements HTTPHeaderPropagationClientInterceptor and HTTPHeaderPropagationStreamClientInterceptor // by inserting headers that are supposed to be forwarded into metadata of the request -func injectForwardedHeadersIntoMetadata(ctx context.Context) context.Context { - headerMap := util_log.HeaderMapFromContext(ctx) - if headerMap == nil { +func injectForwardedRequestMetadata(ctx context.Context) context.Context { + requestMetadataMap := requestmeta.MapFromContext(ctx) + if requestMetadataMap == nil { return ctx } md, ok := metadata.FromOutgoingContext(ctx) @@ -85,13 +86,13 @@ func injectForwardedHeadersIntoMetadata(ctx context.Context) context.Context { } newCtx := ctx - if _, ok := md[util_log.HeaderPropagationStringForRequestLogging]; !ok { + if _, ok := md[requestmeta.PropagationStringForRequestMetadata]; !ok { var mdContent []string - for header, content := range headerMap { - mdContent = append(mdContent, header, content) + for requestMetadata, content := range requestMetadataMap { + mdContent = append(mdContent, requestMetadata, content) } md = md.Copy() - md[util_log.HeaderPropagationStringForRequestLogging] = mdContent + md[requestmeta.PropagationStringForRequestMetadata] = mdContent newCtx = metadata.NewOutgoingContext(ctx, md) } return newCtx diff --git a/pkg/util/histogram/testutils.go b/pkg/util/histogram/testutils.go index d0c46a6499..82fdf2b0f6 100644 --- a/pkg/util/histogram/testutils.go +++ b/pkg/util/histogram/testutils.go @@ -21,7 +21,7 @@ import ( // Adapted from Prometheus model/histogram/test_utils.go GenerateBigTestHistograms. func GenerateTestHistograms(from, step, numHistograms int) []*histogram.Histogram { var histograms []*histogram.Histogram - for i := 0; i < numHistograms; i++ { + for i := range numHistograms { v := int64(from + i*step) histograms = append(histograms, tsdbutil.GenerateTestGaugeHistogram(v)) } diff --git a/pkg/util/http.go b/pkg/util/http.go index 09b6aea9fe..da7c40cc4d 100644 --- a/pkg/util/http.go +++ b/pkg/util/http.go @@ -46,7 +46,7 @@ func (b BasicAuth) IsEnabled() bool { } // WriteJSONResponse writes some JSON as a HTTP response. -func WriteJSONResponse(w http.ResponseWriter, v interface{}) { +func WriteJSONResponse(w http.ResponseWriter, v any) { w.Header().Set("Content-Type", "application/json") data, err := json.Marshal(v) @@ -62,7 +62,7 @@ func WriteJSONResponse(w http.ResponseWriter, v interface{}) { } // WriteYAMLResponse writes some YAML as a HTTP response. -func WriteYAMLResponse(w http.ResponseWriter, v interface{}) { +func WriteYAMLResponse(w http.ResponseWriter, v any) { // There is not standardised content-type for YAML, text/plain ensures the // YAML is displayed in the browser instead of offered as a download w.Header().Set("Content-Type", "text/plain; charset=utf-8") @@ -97,7 +97,7 @@ func WriteHTMLResponse(w http.ResponseWriter, message string) { // RenderHTTPResponse either responds with json or a rendered html page using the passed in template // by checking the Accepts header -func RenderHTTPResponse(w http.ResponseWriter, v interface{}, t *template.Template, r *http.Request) { +func RenderHTTPResponse(w http.ResponseWriter, v any, t *template.Template, r *http.Request) { accept := r.Header.Get("Accept") if strings.Contains(accept, "application/json") { WriteJSONResponse(w, v) @@ -111,7 +111,7 @@ func RenderHTTPResponse(w http.ResponseWriter, v interface{}, t *template.Templa } // StreamWriteYAMLResponseCommon stream writes data as http response -func streamWriteYAMLResponseCommon(w http.ResponseWriter, iter chan interface{}, logger log.Logger, marshalFn func(in interface{}) (out []byte, err error)) { +func streamWriteYAMLResponseCommon(w http.ResponseWriter, iter chan any, logger log.Logger, marshalFn func(in any) (out []byte, err error)) { w.Header().Set("Content-Type", "application/yaml") for v := range iter { data, err := marshalFn(v) @@ -128,12 +128,12 @@ func streamWriteYAMLResponseCommon(w http.ResponseWriter, iter chan interface{}, } // StreamWriteYAMLResponse stream writes data as http response using yaml v2 library -func StreamWriteYAMLResponse(w http.ResponseWriter, iter chan interface{}, logger log.Logger) { +func StreamWriteYAMLResponse(w http.ResponseWriter, iter chan any, logger log.Logger) { streamWriteYAMLResponseCommon(w, iter, logger, yaml.Marshal) } // StreamWriteYAMLV3Response stream writes data as http response using yaml v3 library -func StreamWriteYAMLV3Response(w http.ResponseWriter, iter chan interface{}, logger log.Logger) { +func StreamWriteYAMLV3Response(w http.ResponseWriter, iter chan any, logger log.Logger) { streamWriteYAMLResponseCommon(w, iter, logger, yamlv3.Marshal) } diff --git a/pkg/util/http_test.go b/pkg/util/http_test.go index d20f886161..a5226ba475 100644 --- a/pkg/util/http_test.go +++ b/pkg/util/http_test.go @@ -123,7 +123,7 @@ func TestStreamWriteYAMLResponse(t *testing.T) { w := httptest.NewRecorder() done := make(chan struct{}) - iter := make(chan interface{}) + iter := make(chan any) go func() { util.StreamWriteYAMLResponse(w, iter, util_log.Logger) close(done) diff --git a/pkg/util/httpgrpcutil/errors.go b/pkg/util/httpgrpcutil/errors.go index b2b17eed86..c841e0047b 100644 --- a/pkg/util/httpgrpcutil/errors.go +++ b/pkg/util/httpgrpcutil/errors.go @@ -7,7 +7,7 @@ import ( "github.com/weaveworks/common/httpgrpc" ) -func WrapHTTPGrpcError(err error, format string, args ...interface{}) error { +func WrapHTTPGrpcError(err error, format string, args ...any) error { if err == nil { return nil } @@ -19,6 +19,6 @@ func WrapHTTPGrpcError(err error, format string, args ...interface{}) error { return httpgrpc.ErrorFromHTTPResponse(&httpgrpc.HTTPResponse{ Code: resp.Code, Headers: resp.Headers, - Body: []byte(fmt.Sprintf("%s, %s", msg, err)), + Body: fmt.Appendf(nil, "%s, %s", msg, err), }) } diff --git a/pkg/util/labels.go b/pkg/util/labels.go index c1bc12653f..2e78a0aa90 100644 --- a/pkg/util/labels.go +++ b/pkg/util/labels.go @@ -10,10 +10,10 @@ import ( // LabelsToMetric converts a Labels to Metric // Don't do this on any performance sensitive paths. func LabelsToMetric(ls labels.Labels) model.Metric { - m := make(model.Metric, len(ls)) - for _, l := range ls { + m := make(model.Metric, ls.Len()) + ls.Range(func(l labels.Label) { m[model.LabelName(l.Name)] = model.LabelValue(l.Value) - } + }) return m } diff --git a/pkg/util/labelset/tracker.go b/pkg/util/labelset/tracker.go index 6fa703ccb2..2f624554ba 100644 --- a/pkg/util/labelset/tracker.go +++ b/pkg/util/labelset/tracker.go @@ -20,7 +20,7 @@ type LabelSetTracker struct { // NewLabelSetTracker initializes a LabelSetTracker to keep track of active labelset limits. func NewLabelSetTracker() *LabelSetTracker { shards := make([]*labelSetCounterShard, 0, numMetricShards) - for i := 0; i < numMetricShards; i++ { + for range numMetricShards { shards = append(shards, &labelSetCounterShard{ RWMutex: &sync.RWMutex{}, userLabelSets: map[string]map[uint64]labels.Labels{}, @@ -53,7 +53,7 @@ func (m *LabelSetTracker) Track(userId string, hash uint64, labelSet labels.Labe // It takes a function for user to customize the metrics cleanup logic when either a user or // a specific label set is removed. If a user is removed then removeUser is set to true. func (m *LabelSetTracker) UpdateMetrics(userSet map[string]map[uint64]struct{}, deleteMetricFunc func(user, labelSetStr string, removeUser bool)) { - for i := 0; i < numMetricShards; i++ { + for i := range numMetricShards { shard := m.shards[i] shard.Lock() @@ -98,7 +98,7 @@ func (m *LabelSetTracker) labelSetExists(userId string, hash uint64, labelSet la // userExists is used for testing only to check the existence of a user. func (m *LabelSetTracker) userExists(userId string) bool { - for i := 0; i < numMetricShards; i++ { + for i := range numMetricShards { shard := m.shards[i] shard.RLock() defer shard.RUnlock() diff --git a/pkg/util/limiter/query_limiter_test.go b/pkg/util/limiter/query_limiter_test.go index 699adccd32..f58fdc3339 100644 --- a/pkg/util/limiter/query_limiter_test.go +++ b/pkg/util/limiter/query_limiter_test.go @@ -96,7 +96,7 @@ func TestQueryLimiter_AddSeriesBatch_ShouldReturnErrorOnLimitExceeded(t *testing limiter := NewQueryLimiter(10, 0, 0, 0) series := make([][]cortexpb.LabelAdapter, 0, 10) - for i := 0; i < 10; i++ { + for i := range 10 { s := []cortexpb.LabelAdapter{ { Name: labels.MetricName, @@ -160,7 +160,7 @@ func AddSeriesConcurrentBench(b *testing.B, batchSize int) { worker := func(w int) { defer wg.Done() var series []labels.Labels - for i := 0; i < b.N; i++ { + for i := 0; b.Loop(); i++ { series = append(series, labels.FromMap(map[string]string{ labels.MetricName: metricName + "_1", @@ -170,10 +170,7 @@ func AddSeriesConcurrentBench(b *testing.B, batchSize int) { for i := 0; i < len(series); i += batchSize { s := make([][]cortexpb.LabelAdapter, 0, batchSize) - j := i + batchSize - if j > len(series) { - j = len(series) - } + j := min(i+batchSize, len(series)) for k := i; k < j; k++ { s = append(s, cortexpb.FromLabelsToLabelAdapters(series[k])) } diff --git a/pkg/util/limiter/rate_limiter_test.go b/pkg/util/limiter/rate_limiter_test.go index 907624c10c..7fa3f39195 100644 --- a/pkg/util/limiter/rate_limiter_test.go +++ b/pkg/util/limiter/rate_limiter_test.go @@ -70,9 +70,7 @@ func BenchmarkRateLimiter_CustomMultiTenant(b *testing.B) { limiter := NewRateLimiter(strategy, 10*time.Second) now := time.Now() - b.ResetTimer() - - for i := 0; i < b.N; i++ { + for b.Loop() { limiter.AllowN(now, "test", 1) } } @@ -81,9 +79,7 @@ func BenchmarkRateLimiter_OriginalSingleTenant(b *testing.B) { limiter := rate.NewLimiter(rate.Limit(1), 1) now := time.Now() - b.ResetTimer() - - for i := 0; i < b.N; i++ { + for b.Loop() { limiter.AllowN(now, 1) } } diff --git a/pkg/util/limiter/resource_based_limiter.go b/pkg/util/limiter/resource_based_limiter.go index 40415e3919..01838e1923 100644 --- a/pkg/util/limiter/resource_based_limiter.go +++ b/pkg/util/limiter/resource_based_limiter.go @@ -5,17 +5,15 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" "github.com/cortexproject/cortex/pkg/util/resource" ) const ErrResourceLimitReachedStr = "resource limit reached" -type ResourceLimitReachedError struct{} - -func (e *ResourceLimitReachedError) Error() string { - return ErrResourceLimitReachedStr -} +var ErrResourceLimitReached = status.Error(codes.ResourceExhausted, ErrResourceLimitReachedStr) type ResourceBasedLimiter struct { resourceMonitor resource.IMonitor @@ -64,9 +62,22 @@ func (l *ResourceBasedLimiter) AcceptNewRequest() error { if utilization >= limit { l.limitBreachedCount.WithLabelValues(string(resType)).Inc() - return fmt.Errorf("%s utilization limit reached (limit: %.3f, utilization: %.3f)", resType, limit, utilization) + return fmt.Errorf("%s utilization limit reached (limit: %.3f, utilization: %.3f): %s", resType, limit, utilization, ErrResourceLimitReachedStr) } } return nil } + +type MockMonitor struct { + CpuUtilization float64 + HeapUtilization float64 +} + +func (m *MockMonitor) GetCPUUtilization() float64 { + return m.CpuUtilization +} + +func (m *MockMonitor) GetHeapUtilization() float64 { + return m.HeapUtilization +} diff --git a/pkg/util/limiter/resource_based_limiter_test.go b/pkg/util/limiter/resource_based_limiter_test.go index c84d59009f..7b7b626925 100644 --- a/pkg/util/limiter/resource_based_limiter_test.go +++ b/pkg/util/limiter/resource_based_limiter_test.go @@ -5,6 +5,8 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" "github.com/cortexproject/cortex/pkg/util/resource" ) @@ -15,16 +17,17 @@ func Test_ResourceBasedLimiter(t *testing.T) { resource.Heap: 0.5, } - _, err := NewResourceBasedLimiter(&mockMonitor{}, limits, prometheus.DefaultRegisterer, "ingester") + limiter, err := NewResourceBasedLimiter(&MockMonitor{ + CpuUtilization: 0.2, + HeapUtilization: 0.2, + }, limits, prometheus.DefaultRegisterer, "ingester") require.NoError(t, err) -} - -type mockMonitor struct{} -func (m *mockMonitor) GetCPUUtilization() float64 { - return 0 + err = limiter.AcceptNewRequest() + require.NoError(t, err) } -func (m *mockMonitor) GetHeapUtilization() float64 { - return 0 +func Test_ResourceBasedLimiter_ErrResourceLimitReached(t *testing.T) { + // Expected error code from isRetryableError in blocks_store_queryable.go + require.Equal(t, codes.ResourceExhausted, status.Code(ErrResourceLimitReached)) } diff --git a/pkg/util/log/log.go b/pkg/util/log/log.go index 1db95b0b07..51df578b21 100644 --- a/pkg/util/log/log.go +++ b/pkg/util/log/log.go @@ -1,9 +1,7 @@ package log import ( - "context" "fmt" - "net/http" "os" "github.com/go-kit/log" @@ -12,15 +10,6 @@ import ( "github.com/prometheus/common/promslog" "github.com/weaveworks/common/logging" "github.com/weaveworks/common/server" - "google.golang.org/grpc/metadata" -) - -type contextKey int - -const ( - headerMapContextKey contextKey = 0 - - HeaderPropagationStringForRequestLogging string = "x-http-header-forwarding-logging" ) var ( @@ -83,7 +72,7 @@ func newLoggerWithFormat(format logging.Format) log.Logger { return logger } -func newPrometheusLoggerFrom(logger log.Logger, logLevel logging.Level, keyvals ...interface{}) log.Logger { +func newPrometheusLoggerFrom(logger log.Logger, logLevel logging.Level, keyvals ...any) log.Logger { // Sort the logger chain to avoid expensive log.Valuer evaluation for disallowed level. // Ref: https://github.com/go-kit/log/issues/14#issuecomment-945038252 logger = log.With(logger, "ts", log.DefaultTimestampUTC) @@ -101,7 +90,7 @@ func newPrometheusLoggerFrom(logger log.Logger, logLevel logging.Level, keyvals } // Log increments the appropriate Prometheus counter depending on the log level. -func (pl *PrometheusLogger) Log(kv ...interface{}) error { +func (pl *PrometheusLogger) Log(kv ...any) error { pl.logger.Log(kv...) l := "unknown" for i := 1; i < len(kv); i += 2 { @@ -126,36 +115,3 @@ func CheckFatal(location string, err error) { os.Exit(1) } } - -func HeaderMapFromContext(ctx context.Context) map[string]string { - headerMap, ok := ctx.Value(headerMapContextKey).(map[string]string) - if !ok { - return nil - } - return headerMap -} - -func ContextWithHeaderMap(ctx context.Context, headerMap map[string]string) context.Context { - return context.WithValue(ctx, headerMapContextKey, headerMap) -} - -// InjectHeadersIntoHTTPRequest injects the logging header map from the context into the request headers. -func InjectHeadersIntoHTTPRequest(headerMap map[string]string, request *http.Request) { - for header, contents := range headerMap { - request.Header.Add(header, contents) - } -} - -func ContextWithHeaderMapFromMetadata(ctx context.Context, md metadata.MD) context.Context { - headersSlice, ok := md[HeaderPropagationStringForRequestLogging] - if !ok || len(headersSlice)%2 == 1 { - return ctx - } - - headerMap := make(map[string]string) - for i := 0; i < len(headersSlice); i += 2 { - headerMap[headersSlice[i]] = headersSlice[i+1] - } - - return ContextWithHeaderMap(ctx, headerMap) -} diff --git a/pkg/util/log/log_test.go b/pkg/util/log/log_test.go index 0401d4ce08..bade053327 100644 --- a/pkg/util/log/log_test.go +++ b/pkg/util/log/log_test.go @@ -1,73 +1,15 @@ package log import ( - "context" "io" - "net/http" "os" "testing" "github.com/go-kit/log/level" "github.com/stretchr/testify/require" "github.com/weaveworks/common/server" - "google.golang.org/grpc/metadata" ) -func TestHeaderMapFromMetadata(t *testing.T) { - md := metadata.New(nil) - md.Append(HeaderPropagationStringForRequestLogging, "TestHeader1", "SomeInformation", "TestHeader2", "ContentsOfTestHeader2") - - ctx := context.Background() - - ctx = ContextWithHeaderMapFromMetadata(ctx, md) - - headerMap := HeaderMapFromContext(ctx) - - require.Contains(t, headerMap, "TestHeader1") - require.Contains(t, headerMap, "TestHeader2") - require.Equal(t, "SomeInformation", headerMap["TestHeader1"]) - require.Equal(t, "ContentsOfTestHeader2", headerMap["TestHeader2"]) -} - -func TestHeaderMapFromMetadataWithImproperLength(t *testing.T) { - md := metadata.New(nil) - md.Append(HeaderPropagationStringForRequestLogging, "TestHeader1", "SomeInformation", "TestHeader2", "ContentsOfTestHeader2", "Test3") - - ctx := context.Background() - - ctx = ContextWithHeaderMapFromMetadata(ctx, md) - - headerMap := HeaderMapFromContext(ctx) - require.Nil(t, headerMap) -} - -func TestInjectHeadersIntoHTTPRequest(t *testing.T) { - contentsMap := make(map[string]string) - contentsMap["TestHeader1"] = "RequestID" - contentsMap["TestHeader2"] = "ContentsOfTestHeader2" - - h := http.Header{} - req := &http.Request{ - Method: "GET", - RequestURI: "/HTTPHeaderTest", - Body: http.NoBody, - Header: h, - } - InjectHeadersIntoHTTPRequest(contentsMap, req) - - header1 := req.Header.Values("TestHeader1") - header2 := req.Header.Values("TestHeader2") - - require.NotNil(t, header1) - require.NotNil(t, header2) - require.Equal(t, 1, len(header1)) - require.Equal(t, 1, len(header2)) - - require.Equal(t, "RequestID", header1[0]) - require.Equal(t, "ContentsOfTestHeader2", header2[0]) - -} - func TestInitLogger(t *testing.T) { stderr := os.Stderr r, w, err := os.Pipe() @@ -85,8 +27,8 @@ func TestInitLogger(t *testing.T) { require.NoError(t, w.Close()) logs, err := io.ReadAll(r) require.NoError(t, err) - require.Contains(t, string(logs), "caller=log_test.go:82 level=debug hello=world") - require.Contains(t, string(logs), "caller=log_test.go:83 level=debug msg=\"hello world\"") + require.Contains(t, string(logs), "caller=log_test.go:24 level=debug hello=world") + require.Contains(t, string(logs), "caller=log_test.go:25 level=debug msg=\"hello world\"") } func BenchmarkDisallowedLogLevels(b *testing.B) { @@ -94,7 +36,7 @@ func BenchmarkDisallowedLogLevels(b *testing.B) { require.NoError(b, cfg.LogLevel.Set("warn")) InitLogger(cfg) - for i := 0; i < b.N; i++ { + for i := 0; b.Loop(); i++ { level.Info(Logger).Log("hello", "world", "number", i) level.Debug(Logger).Log("hello", "world", "number", i) } diff --git a/pkg/util/log/wrappers.go b/pkg/util/log/wrappers.go index 1394b7b0b7..9a706a570e 100644 --- a/pkg/util/log/wrappers.go +++ b/pkg/util/log/wrappers.go @@ -9,6 +9,7 @@ import ( "go.opentelemetry.io/otel/trace" "github.com/cortexproject/cortex/pkg/tenant" + "github.com/cortexproject/cortex/pkg/util/requestmeta" ) // WithUserID returns a Logger that has information about the current user in @@ -64,7 +65,7 @@ func WithSourceIPs(sourceIPs string, l log.Logger) log.Logger { // HeadersFromContext enables the logging of specified HTTP Headers that have been added to a context func HeadersFromContext(ctx context.Context, l log.Logger) log.Logger { - headerContentsMap := HeaderMapFromContext(ctx) + headerContentsMap := requestmeta.LoggingHeadersAndRequestIdFromContext(ctx) for header, contents := range headerContentsMap { l = log.With(l, header, contents) } diff --git a/pkg/util/logical_plan/test_logicalplan_utils.go b/pkg/util/logical_plan/test_logicalplan_utils.go new file mode 100644 index 0000000000..49bd8da286 --- /dev/null +++ b/pkg/util/logical_plan/test_logicalplan_utils.go @@ -0,0 +1,50 @@ +package logical_plan + +import ( + "time" + + "github.com/prometheus/prometheus/promql/parser" + "github.com/thanos-io/promql-engine/logicalplan" + "github.com/thanos-io/promql-engine/query" +) + +func getStartAndEnd(start time.Time, end time.Time, step time.Duration) (time.Time, time.Time) { + if step == 0 { + return start, start + } + return start, end +} + +func CreateTestLogicalPlan(qs string, start time.Time, end time.Time, step time.Duration) (*logicalplan.Plan, error) { + + start, end = getStartAndEnd(start, end, step) + + qOpts := query.Options{ + Start: start, + End: end, + Step: step, + StepsBatch: 10, + NoStepSubqueryIntervalFn: func(duration time.Duration) time.Duration { + return 0 + }, + LookbackDelta: 0, + EnablePerStepStats: false, + } + + expr, err := parser.NewParser(qs, parser.WithFunctions(parser.Functions)).ParseExpr() + if err != nil { + return nil, err + } + + planOpts := logicalplan.PlanOptions{ + DisableDuplicateLabelCheck: false, + } + + logicalPlan, err := logicalplan.NewFromAST(expr, &qOpts, planOpts) + if err != nil { + return nil, err + } + optimizedPlan, _ := logicalPlan.Optimize(logicalplan.DefaultOptimizers) + + return &optimizedPlan, nil +} diff --git a/pkg/util/metrics_helper.go b/pkg/util/metrics_helper.go index 0a823920fd..6678c47875 100644 --- a/pkg/util/metrics_helper.go +++ b/pkg/util/metrics_helper.go @@ -20,7 +20,7 @@ import ( var ( bytesBufferPool = sync.Pool{ - New: func() interface{} { + New: func() any { return bytes.NewBuffer(nil) }, } @@ -723,7 +723,7 @@ func (r *UserRegistries) BuildMetricFamiliesPerUser() MetricFamiliesPerUser { // FromLabelPairsToLabels converts dto.LabelPair into labels.Labels. func FromLabelPairsToLabels(pairs []*dto.LabelPair) labels.Labels { - builder := labels.NewBuilder(nil) + builder := labels.NewBuilder(labels.EmptyLabels()) for _, pair := range pairs { builder.Set(pair.GetName(), pair.GetValue()) } @@ -770,7 +770,7 @@ func GetLabels(c prometheus.Collector, filter map[string]string) ([]labels.Label errs := tsdb_errors.NewMulti() var result []labels.Labels dtoMetric := &dto.Metric{} - lbls := labels.NewBuilder(nil) + lbls := labels.NewBuilder(labels.EmptyLabels()) nextMetric: for m := range ch { @@ -781,7 +781,7 @@ nextMetric: continue } - lbls.Reset(nil) + lbls.Reset(labels.EmptyLabels()) for _, lp := range dtoMetric.Label { n := lp.GetName() v := lp.GetValue() diff --git a/pkg/util/metrics_helper_test.go b/pkg/util/metrics_helper_test.go index 712f681b6e..85d9895389 100644 --- a/pkg/util/metrics_helper_test.go +++ b/pkg/util/metrics_helper_test.go @@ -103,7 +103,7 @@ func BenchmarkGetMetricsWithLabelNames(b *testing.B) { // Generate metrics and add them to a metric family. mf := &dto.MetricFamily{Metric: make([]*dto.Metric, 0, numMetrics)} - for i := 0; i < numMetrics; i++ { + for i := range numMetrics { labels := []*dto.LabelPair{{ Name: proto.String("unique"), Value: proto.String(strconv.Itoa(i)), @@ -122,10 +122,9 @@ func BenchmarkGetMetricsWithLabelNames(b *testing.B) { }) } - b.ResetTimer() b.ReportAllocs() - for n := 0; n < b.N; n++ { + for b.Loop() { out := getMetricsWithLabelNames(mf, []string{"label_1", "label_2", "label_3"}) if expected := 1; len(out) != expected { @@ -471,22 +470,22 @@ func TestFloat64PrecisionStability(t *testing.T) { labelNames := []string{"label_one", "label_two"} g := promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{Name: "test_gauge"}, labelNames) - for i := 0; i < cardinality; i++ { + for i := range cardinality { g.WithLabelValues("a", strconv.Itoa(i)).Set(rand.Float64()) } c := promauto.With(reg).NewCounterVec(prometheus.CounterOpts{Name: "test_counter"}, labelNames) - for i := 0; i < cardinality; i++ { + for i := range cardinality { c.WithLabelValues("a", strconv.Itoa(i)).Add(rand.Float64()) } h := promauto.With(reg).NewHistogramVec(prometheus.HistogramOpts{Name: "test_histogram", Buckets: []float64{0.1, 0.5, 1}}, labelNames) - for i := 0; i < cardinality; i++ { + for i := range cardinality { h.WithLabelValues("a", strconv.Itoa(i)).Observe(rand.Float64()) } s := promauto.With(reg).NewSummaryVec(prometheus.SummaryOpts{Name: "test_summary"}, labelNames) - for i := 0; i < cardinality; i++ { + for i := range cardinality { s.WithLabelValues("a", strconv.Itoa(i)).Observe(rand.Float64()) } @@ -496,7 +495,7 @@ func TestFloat64PrecisionStability(t *testing.T) { // Ensure multiple runs always return the same exact results. expected := map[string][]*dto.Metric{} - for run := 0; run < numRuns; run++ { + for run := range numRuns { mf := registries.BuildMetricFamiliesPerUser() gauge := collectMetrics(t, func(out chan prometheus.Metric) { @@ -1002,22 +1001,22 @@ func setupTestMetrics() *testMetrics { labelNames := []string{"label_one", "label_two"} g := promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{Name: "test_gauge"}, labelNames) - for i := 0; i < cardinality; i++ { + for i := range cardinality { g.WithLabelValues("a", strconv.Itoa(i)).Set(float64(userID)) } c := promauto.With(reg).NewCounterVec(prometheus.CounterOpts{Name: "test_counter"}, labelNames) - for i := 0; i < cardinality; i++ { + for i := range cardinality { c.WithLabelValues("a", strconv.Itoa(i)).Add(float64(userID)) } h := promauto.With(reg).NewHistogramVec(prometheus.HistogramOpts{Name: "test_histogram", Buckets: []float64{1, 3, 5}}, labelNames) - for i := 0; i < cardinality; i++ { + for i := range cardinality { h.WithLabelValues("a", strconv.Itoa(i)).Observe(float64(userID)) } s := promauto.With(reg).NewSummaryVec(prometheus.SummaryOpts{Name: "test_summary"}, labelNames) - for i := 0; i < cardinality; i++ { + for i := range cardinality { s.WithLabelValues("a", strconv.Itoa(i)).Observe(float64(userID)) } @@ -1135,8 +1134,7 @@ func BenchmarkGetLabels_SmallSet(b *testing.B) { m.WithLabelValues("worst", "user3").Inc() - b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { if _, err := GetLabels(m, map[string]string{"user": "user1", "reason": "worse"}); err != nil { b.Fatal(err) } @@ -1163,9 +1161,8 @@ func BenchmarkGetLabels_MediumSet(b *testing.B) { m.WithLabelValues("worst", fmt.Sprintf("user%d", i)).Inc() } } - b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { if _, err := GetLabels(m, map[string]string{"user": "user1", "reason": "worse"}); err != nil { b.Fatal(err) } diff --git a/pkg/util/middleware/grpc.go b/pkg/util/middleware/grpc.go index aee899095b..3adea5eb9a 100644 --- a/pkg/util/middleware/grpc.go +++ b/pkg/util/middleware/grpc.go @@ -15,7 +15,7 @@ import ( // PrometheusGRPCUnaryInstrumentation records duration of gRPC requests client side. func PrometheusGRPCUnaryInstrumentation(metric *prometheus.HistogramVec) grpc.UnaryClientInterceptor { - return func(ctx context.Context, method string, req, resp interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + return func(ctx context.Context, method string, req, resp any, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { start := time.Now() err := invoker(ctx, method, req, resp, cc, opts...) metric.WithLabelValues(method, errorCode(err)).Observe(time.Since(start).Seconds()) @@ -46,7 +46,7 @@ type instrumentedClientStream struct { grpc.ClientStream } -func (s *instrumentedClientStream) SendMsg(m interface{}) error { +func (s *instrumentedClientStream) SendMsg(m any) error { err := s.ClientStream.SendMsg(m) if err == nil { return err @@ -61,7 +61,7 @@ func (s *instrumentedClientStream) SendMsg(m interface{}) error { return err } -func (s *instrumentedClientStream) RecvMsg(m interface{}) error { +func (s *instrumentedClientStream) RecvMsg(m any) error { err := s.ClientStream.RecvMsg(m) if err == nil { return err @@ -104,7 +104,7 @@ type instrumentedReusableClientStream struct { grpc.ClientStream } -func (s *instrumentedReusableClientStream) SendMsg(m interface{}) error { +func (s *instrumentedReusableClientStream) SendMsg(m any) error { start := time.Now() err := s.ClientStream.SendMsg(m) if err != nil && err != io.EOF { @@ -115,7 +115,7 @@ func (s *instrumentedReusableClientStream) SendMsg(m interface{}) error { return err } -func (s *instrumentedReusableClientStream) RecvMsg(m interface{}) error { +func (s *instrumentedReusableClientStream) RecvMsg(m any) error { start := time.Now() err := s.ClientStream.RecvMsg(m) if err != nil && err != io.EOF { diff --git a/pkg/util/modules/modules.go b/pkg/util/modules/modules.go index 06e7e05a1e..bab811ffcf 100644 --- a/pkg/util/modules/modules.go +++ b/pkg/util/modules/modules.go @@ -2,6 +2,7 @@ package modules import ( "fmt" + "slices" "sort" "github.com/go-kit/log" @@ -210,11 +211,8 @@ func (m *Manager) findInverseDependencies(mod string, mods []string) []string { result := []string(nil) for _, n := range mods { - for _, d := range m.modules[n].deps { - if d == mod { - result = append(result, n) - break - } + if slices.Contains(m.modules[n].deps, mod) { + result = append(result, n) } } diff --git a/pkg/util/priority_queue.go b/pkg/util/priority_queue.go index 8d11c55088..4bb8b1f068 100644 --- a/pkg/util/priority_queue.go +++ b/pkg/util/priority_queue.go @@ -30,11 +30,11 @@ func (q queue) Swap(i, j int) { q[i], q[j] = q[j], q[i] } // Push and Pop use pointer receivers because they modify the slice's length, // not just its contents. -func (q *queue) Push(x interface{}) { +func (q *queue) Push(x any) { *q = append(*q, x.(PriorityOp)) } -func (q *queue) Pop() interface{} { +func (q *queue) Pop() any { old := *q n := len(old) x := old[n-1] diff --git a/pkg/util/push/otlp.go b/pkg/util/push/otlp.go index e328f1ae71..cdf1259d12 100644 --- a/pkg/util/push/otlp.go +++ b/pkg/util/push/otlp.go @@ -10,6 +10,7 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" + "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/prompb" "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite" @@ -66,13 +67,13 @@ func OTLPHandler(maxRecvMsgSize int, overrides *validation.Overrides, cfg distri // otlp to prompb TimeSeries promTsList, promMetadata, err := convertToPromTS(r.Context(), req.Metrics(), cfg, overrides, userID, logger) - if err != nil { + if err != nil && len(promTsList) == 0 { http.Error(w, err.Error(), http.StatusBadRequest) return } // convert prompb to cortexpb TimeSeries - tsList := []cortexpb.PreallocTimeseries(nil) + tsList := make([]cortexpb.PreallocTimeseries, 0, len(promTsList)) for _, v := range promTsList { tsList = append(tsList, cortexpb.PreallocTimeseries{TimeSeries: &cortexpb.TimeSeries{ Labels: makeLabels(v.Labels), @@ -177,8 +178,10 @@ func decodeOTLPWriteRequest(ctx context.Context, r *http.Request, maxSize int) ( func convertToPromTS(ctx context.Context, pmetrics pmetric.Metrics, cfg distributor.OTLPConfig, overrides *validation.Overrides, userID string, logger log.Logger) ([]prompb.TimeSeries, []prompb.MetricMetadata, error) { promConverter := prometheusremotewrite.NewPrometheusConverter() settings := prometheusremotewrite.Settings{ - AddMetricSuffixes: true, - DisableTargetInfo: cfg.DisableTargetInfo, + AddMetricSuffixes: true, + DisableTargetInfo: cfg.DisableTargetInfo, + AllowDeltaTemporality: cfg.AllowDeltaTemporality, + EnableTypeAndUnitLabels: cfg.EnableTypeAndUnitLabels, } var annots annotations.Annotations @@ -187,7 +190,9 @@ func convertToPromTS(ctx context.Context, pmetrics pmetric.Metrics, cfg distribu if cfg.ConvertAllAttributes { annots, err = promConverter.FromMetrics(ctx, convertToMetricsAttributes(pmetrics), settings) } else { - settings.PromoteResourceAttributes = overrides.PromoteResourceAttributes(userID) + settings.PromoteResourceAttributes = prometheusremotewrite.NewPromoteResourceAttributes(config.OTLPConfig{ + PromoteResourceAttributes: overrides.PromoteResourceAttributes(userID), + }) annots, err = promConverter.FromMetrics(ctx, pmetrics, settings) } @@ -197,19 +202,18 @@ func convertToPromTS(ctx context.Context, pmetrics pmetric.Metrics, cfg distribu } if err != nil { - level.Error(logger).Log("msg", "Error translating OTLP metrics to Prometheus write request", "err", err) - return nil, nil, err + level.Warn(logger).Log("msg", "Error translating OTLP metrics to Prometheus write request", "err", err) } - return promConverter.TimeSeries(), promConverter.Metadata(), nil + return promConverter.TimeSeries(), promConverter.Metadata(), err } func makeLabels(in []prompb.Label) []cortexpb.LabelAdapter { - out := make(labels.Labels, 0, len(in)) + builder := labels.NewBuilder(labels.EmptyLabels()) for _, l := range in { - out = append(out, labels.Label{Name: l.Name, Value: l.Value}) + builder.Set(l.Name, l.Value) } - return cortexpb.FromLabelsToLabelAdapters(out) + return cortexpb.FromLabelsToLabelAdapters(builder.Labels()) } func makeSamples(in []prompb.Sample) []cortexpb.Sample { diff --git a/pkg/util/push/otlp_test.go b/pkg/util/push/otlp_test.go index de3b780e09..efcdb40655 100644 --- a/pkg/util/push/otlp_test.go +++ b/pkg/util/push/otlp_test.go @@ -4,13 +4,16 @@ import ( "bytes" "compress/gzip" "context" + "fmt" "io" "net/http" "net/http/httptest" + "sort" "testing" "time" "github.com/go-kit/log" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/prompb" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -25,6 +28,325 @@ import ( "github.com/cortexproject/cortex/pkg/util/validation" ) +func TestOTLP_EnableTypeAndUnitLabels(t *testing.T) { + logger := log.NewNopLogger() + ctx := context.Background() + ts := time.Now() + + tests := []struct { + description string + enableTypeAndUnitLabels bool + allowDeltaTemporality bool + otlpSeries pmetric.Metric + expectedLabels labels.Labels + expectedMetadata prompb.MetricMetadata + }{ + { + description: "[enableTypeAndUnitLabels: true], the '__type__' label should be attached when the type is the gauge", + enableTypeAndUnitLabels: true, + otlpSeries: createOtelSum("test", "seconds", pmetric.AggregationTemporalityCumulative, ts), + expectedLabels: labels.FromMap(map[string]string{ + "__name__": "test_seconds", + "__type__": "gauge", + "__unit__": "seconds", + "test_label": "test_value", + }), + expectedMetadata: createPromMetadata("test_seconds", "seconds", prompb.MetricMetadata_GAUGE), + }, + { + description: "[enableTypeAndUnitLabels: true], the '__type__' label should not be attached when the type is unknown", + enableTypeAndUnitLabels: true, + allowDeltaTemporality: true, + otlpSeries: createOtelSum("test", "seconds", pmetric.AggregationTemporalityDelta, ts), + expectedLabels: labels.FromMap(map[string]string{ + "__name__": "test_seconds", + "__unit__": "seconds", + "test_label": "test_value", + }), + expectedMetadata: createPromMetadata("test_seconds", "seconds", prompb.MetricMetadata_UNKNOWN), + }, + { + description: "[enableTypeAndUnitLabels: false]", + enableTypeAndUnitLabels: false, + otlpSeries: createOtelSum("test", "seconds", pmetric.AggregationTemporalityCumulative, ts), + expectedLabels: labels.FromMap(map[string]string{ + "__name__": "test_seconds", + "test_label": "test_value", + }), + expectedMetadata: createPromMetadata("test_seconds", "seconds", prompb.MetricMetadata_GAUGE), + }, + } + + for _, test := range tests { + t.Run(test.description, func(t *testing.T) { + cfg := distributor.OTLPConfig{ + EnableTypeAndUnitLabels: test.enableTypeAndUnitLabels, + AllowDeltaTemporality: test.allowDeltaTemporality, + } + metrics := pmetric.NewMetrics() + rm := metrics.ResourceMetrics().AppendEmpty() + sm := rm.ScopeMetrics().AppendEmpty() + + test.otlpSeries.CopyTo(sm.Metrics().AppendEmpty()) + + limits := validation.Limits{} + overrides := validation.NewOverrides(limits, nil) + promSeries, metadata, err := convertToPromTS(ctx, metrics, cfg, overrides, "user-1", logger) + require.NoError(t, err) + require.Equal(t, 1, len(promSeries)) + require.Equal(t, prompb.FromLabels(test.expectedLabels, nil), promSeries[0].Labels) + + require.Equal(t, 1, len(metadata)) + require.Equal(t, test.expectedMetadata, metadata[0]) + }) + } +} + +func TestOTLP_AllowDeltaTemporality(t *testing.T) { + logger := log.NewNopLogger() + ctx := context.Background() + ts := time.Now() + + tests := []struct { + description string + allowDeltaTemporality bool + otlpSeries []pmetric.Metric + expectedSeries []prompb.TimeSeries + expectedMetadata []prompb.MetricMetadata + expectedErr string + }{ + { + description: "[allowDeltaTemporality: false] cumulative type should be converted", + allowDeltaTemporality: false, + otlpSeries: []pmetric.Metric{ + createOtelSum("test_1", "", pmetric.AggregationTemporalityCumulative, ts), + createOtelSum("test_2", "", pmetric.AggregationTemporalityCumulative, ts), + }, + expectedSeries: []prompb.TimeSeries{ + createPromFloatSeries("test_1", ts), + createPromFloatSeries("test_2", ts), + }, + expectedMetadata: []prompb.MetricMetadata{ + createPromMetadata("test_1", "", prompb.MetricMetadata_GAUGE), + createPromMetadata("test_2", "", prompb.MetricMetadata_GAUGE), + }, + }, + { + description: "[allowDeltaTemporality: false] delta type should not be converted", + allowDeltaTemporality: false, + otlpSeries: []pmetric.Metric{ + createOtelSum("test_1", "", pmetric.AggregationTemporalityDelta, ts), + createOtelSum("test_2", "", pmetric.AggregationTemporalityDelta, ts), + }, + expectedSeries: []prompb.TimeSeries{}, + expectedMetadata: []prompb.MetricMetadata{}, + expectedErr: `invalid temporality and type combination for metric "test_1"; invalid temporality and type combination for metric "test_2"`, + }, + { + description: "[allowDeltaTemporality: true] delta type should be converted", + allowDeltaTemporality: true, + otlpSeries: []pmetric.Metric{ + createOtelSum("test_1", "", pmetric.AggregationTemporalityDelta, ts), + createOtelSum("test_2", "", pmetric.AggregationTemporalityDelta, ts), + }, + expectedSeries: []prompb.TimeSeries{ + createPromFloatSeries("test_1", ts), + createPromFloatSeries("test_2", ts), + }, + expectedMetadata: []prompb.MetricMetadata{ + createPromMetadata("test_1", "", prompb.MetricMetadata_UNKNOWN), + createPromMetadata("test_2", "", prompb.MetricMetadata_UNKNOWN), + }, + }, + { + description: "[allowDeltaTemporality: false] mixed delta and cumulative, should be converted only for cumulative type", + allowDeltaTemporality: false, + otlpSeries: []pmetric.Metric{ + createOtelSum("test_1", "", pmetric.AggregationTemporalityDelta, ts), + createOtelSum("test_2", "", pmetric.AggregationTemporalityCumulative, ts), + }, + expectedSeries: []prompb.TimeSeries{ + createPromFloatSeries("test_2", ts), + }, + expectedMetadata: []prompb.MetricMetadata{ + createPromMetadata("test_2", "", prompb.MetricMetadata_GAUGE), + }, + expectedErr: `invalid temporality and type combination for metric "test_1"`, + }, + { + description: "[allowDeltaTemporality: false, exponential histogram] cumulative histogram should be converted", + allowDeltaTemporality: false, + otlpSeries: []pmetric.Metric{ + createOtelExponentialHistogram("test_1", pmetric.AggregationTemporalityCumulative, ts), + createOtelExponentialHistogram("test_2", pmetric.AggregationTemporalityCumulative, ts), + }, + expectedSeries: []prompb.TimeSeries{ + createPromNativeHistogramSeries("test_1", prompb.Histogram_UNKNOWN, ts), + createPromNativeHistogramSeries("test_2", prompb.Histogram_UNKNOWN, ts), + }, + expectedMetadata: []prompb.MetricMetadata{ + createPromMetadata("test_1", "", prompb.MetricMetadata_HISTOGRAM), + createPromMetadata("test_2", "", prompb.MetricMetadata_HISTOGRAM), + }, + }, + { + description: "[allowDeltaTemporality: false, exponential histogram] delta histogram should not be converted", + allowDeltaTemporality: false, + otlpSeries: []pmetric.Metric{ + createOtelExponentialHistogram("test_1", pmetric.AggregationTemporalityDelta, ts), + createOtelExponentialHistogram("test_2", pmetric.AggregationTemporalityDelta, ts), + }, + expectedSeries: []prompb.TimeSeries{}, + expectedMetadata: []prompb.MetricMetadata{}, + expectedErr: `invalid temporality and type combination for metric "test_1"; invalid temporality and type combination for metric "test_2"`, + }, + { + description: "[allowDeltaTemporality: true, exponential histogram] delta histogram should be converted", + allowDeltaTemporality: true, + otlpSeries: []pmetric.Metric{ + createOtelExponentialHistogram("test_1", pmetric.AggregationTemporalityDelta, ts), + createOtelExponentialHistogram("test_2", pmetric.AggregationTemporalityDelta, ts), + }, + expectedSeries: []prompb.TimeSeries{ + createPromNativeHistogramSeries("test_1", prompb.Histogram_GAUGE, ts), + createPromNativeHistogramSeries("test_2", prompb.Histogram_GAUGE, ts), + }, + expectedMetadata: []prompb.MetricMetadata{ + createPromMetadata("test_1", "", prompb.MetricMetadata_UNKNOWN), + createPromMetadata("test_2", "", prompb.MetricMetadata_UNKNOWN), + }, + }, + { + description: "[allowDeltaTemporality: false, exponential histogram] mixed delta and cumulative histogram, should be converted only for cumulative type", + allowDeltaTemporality: false, + otlpSeries: []pmetric.Metric{ + createOtelExponentialHistogram("test_1", pmetric.AggregationTemporalityDelta, ts), + createOtelExponentialHistogram("test_2", pmetric.AggregationTemporalityCumulative, ts), + }, + expectedSeries: []prompb.TimeSeries{ + createPromNativeHistogramSeries("test_2", prompb.Histogram_UNKNOWN, ts), + }, + expectedMetadata: []prompb.MetricMetadata{ + createPromMetadata("test_2", "", prompb.MetricMetadata_HISTOGRAM), + }, + expectedErr: `invalid temporality and type combination for metric "test_1"`, + }, + } + + for _, test := range tests { + t.Run(test.description, func(t *testing.T) { + cfg := distributor.OTLPConfig{AllowDeltaTemporality: test.allowDeltaTemporality} + metrics := pmetric.NewMetrics() + rm := metrics.ResourceMetrics().AppendEmpty() + sm := rm.ScopeMetrics().AppendEmpty() + + for _, s := range test.otlpSeries { + s.CopyTo(sm.Metrics().AppendEmpty()) + } + + limits := validation.Limits{} + overrides := validation.NewOverrides(limits, nil) + promSeries, metadata, err := convertToPromTS(ctx, metrics, cfg, overrides, "user-1", logger) + require.Equal(t, sortTimeSeries(test.expectedSeries), sortTimeSeries(promSeries)) + require.Equal(t, test.expectedMetadata, metadata) + if test.expectedErr != "" { + require.Equal(t, test.expectedErr, err.Error()) + } else { + require.NoError(t, err) + } + + }) + } +} + +func createPromMetadata(name, unit string, metadataType prompb.MetricMetadata_MetricType) prompb.MetricMetadata { + return prompb.MetricMetadata{ + Type: metadataType, + Unit: unit, + MetricFamilyName: name, + } +} + +// copied from: https://github.com/prometheus/prometheus/blob/v3.5.0/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go +func sortTimeSeries(series []prompb.TimeSeries) []prompb.TimeSeries { + for i := range series { + sort.Slice(series[i].Labels, func(j, k int) bool { + return series[i].Labels[j].Name < series[i].Labels[k].Name + }) + } + + sort.Slice(series, func(i, j int) bool { + return fmt.Sprint(series[i].Labels) < fmt.Sprint(series[j].Labels) + }) + + return series +} + +// copied from: https://github.com/prometheus/prometheus/blob/v3.5.0/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go +func createPromFloatSeries(name string, ts time.Time) prompb.TimeSeries { + return prompb.TimeSeries{ + Labels: []prompb.Label{ + {Name: "__name__", Value: name}, + {Name: "test_label", Value: "test_value"}, + }, + Samples: []prompb.Sample{{ + Value: 5, + Timestamp: ts.UnixMilli(), + }}, + } +} + +// copied from: https://github.com/prometheus/prometheus/blob/v3.5.0/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go +func createOtelSum(name, unit string, temporality pmetric.AggregationTemporality, ts time.Time) pmetric.Metric { + metrics := pmetric.NewMetricSlice() + m := metrics.AppendEmpty() + m.SetName(name) + m.SetUnit(unit) + sum := m.SetEmptySum() + sum.SetAggregationTemporality(temporality) + dp := sum.DataPoints().AppendEmpty() + dp.SetDoubleValue(5) + dp.SetTimestamp(pcommon.NewTimestampFromTime(ts)) + dp.Attributes().PutStr("test_label", "test_value") + return m +} + +// copied from: https://github.com/prometheus/prometheus/blob/v3.5.0/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go +func createOtelExponentialHistogram(name string, temporality pmetric.AggregationTemporality, ts time.Time) pmetric.Metric { + metrics := pmetric.NewMetricSlice() + m := metrics.AppendEmpty() + m.SetName(name) + hist := m.SetEmptyExponentialHistogram() + hist.SetAggregationTemporality(temporality) + dp := hist.DataPoints().AppendEmpty() + dp.SetCount(1) + dp.SetSum(5) + dp.SetTimestamp(pcommon.NewTimestampFromTime(ts)) + dp.Attributes().PutStr("test_label", "test_value") + return m +} + +// copied from: https://github.com/prometheus/prometheus/blob/v3.5.0/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go +func createPromNativeHistogramSeries(name string, hint prompb.Histogram_ResetHint, ts time.Time) prompb.TimeSeries { + return prompb.TimeSeries{ + Labels: []prompb.Label{ + {Name: "__name__", Value: name}, + {Name: "test_label", Value: "test_value"}, + }, + Histograms: []prompb.Histogram{ + { + Count: &prompb.Histogram_CountInt{CountInt: 1}, + Sum: 5, + Schema: 0, + ZeroThreshold: 1e-128, + ZeroCount: &prompb.Histogram_ZeroCountInt{ZeroCountInt: 0}, + Timestamp: ts.UnixMilli(), + ResetHint: hint, + }, + }, + } +} + func TestOTLPConvertToPromTS(t *testing.T) { logger := log.NewNopLogger() ctx := context.Background() @@ -298,7 +620,7 @@ func getOTLPHttpRequest(otlpRequest *pmetricotlp.ExportRequest, contentType, enc return req, nil } -func BenchmarkOTLPWriteHandler(b *testing.B) { +func BenchmarkOTLPWriteHandlerCompression(b *testing.B) { cfg := distributor.OTLPConfig{ ConvertAllAttributes: false, DisableTargetInfo: false, @@ -315,9 +637,8 @@ func BenchmarkOTLPWriteHandler(b *testing.B) { req, err := getOTLPHttpRequest(&exportRequest, jsonContentType, "") require.NoError(b, err) - b.ResetTimer() b.ReportAllocs() - for i := 0; i < b.N; i++ { + for b.Loop() { recorder := httptest.NewRecorder() handler.ServeHTTP(recorder, req) @@ -330,9 +651,8 @@ func BenchmarkOTLPWriteHandler(b *testing.B) { req, err := getOTLPHttpRequest(&exportRequest, jsonContentType, "gzip") require.NoError(b, err) - b.ResetTimer() b.ReportAllocs() - for i := 0; i < b.N; i++ { + for b.Loop() { recorder := httptest.NewRecorder() handler.ServeHTTP(recorder, req) @@ -345,9 +665,8 @@ func BenchmarkOTLPWriteHandler(b *testing.B) { req, err := getOTLPHttpRequest(&exportRequest, pbContentType, "") require.NoError(b, err) - b.ResetTimer() b.ReportAllocs() - for i := 0; i < b.N; i++ { + for b.Loop() { recorder := httptest.NewRecorder() handler.ServeHTTP(recorder, req) @@ -360,9 +679,8 @@ func BenchmarkOTLPWriteHandler(b *testing.B) { req, err := getOTLPHttpRequest(&exportRequest, pbContentType, "gzip") require.NoError(b, err) - b.ResetTimer() b.ReportAllocs() - for i := 0; i < b.N; i++ { + for b.Loop() { recorder := httptest.NewRecorder() handler.ServeHTTP(recorder, req) @@ -373,6 +691,90 @@ func BenchmarkOTLPWriteHandler(b *testing.B) { }) } +func BenchmarkOTLPWriteHandlerPush(b *testing.B) { + cfg := distributor.OTLPConfig{ + ConvertAllAttributes: false, + DisableTargetInfo: false, + } + overrides := validation.NewOverrides(querier.DefaultLimitsConfig(), nil) + + mockPushFunc := func(context.Context, *cortexpb.WriteRequest) (*cortexpb.WriteResponse, error) { + return &cortexpb.WriteResponse{}, nil + } + handler := OTLPHandler(1000000, overrides, cfg, nil, mockPushFunc) + + tests := []struct { + description string + numSeries int + samplesPerSeries int + numHistograms int + }{ + { + numSeries: 1, + samplesPerSeries: 10, + numHistograms: 1, + }, + { + numSeries: 1, + samplesPerSeries: 100, + numHistograms: 1, + }, + { + numSeries: 1, + samplesPerSeries: 1000, + numHistograms: 1, + }, + { + numSeries: 1, + samplesPerSeries: 1, + numHistograms: 10, + }, + { + numSeries: 1, + samplesPerSeries: 1, + numHistograms: 100, + }, + { + numSeries: 1, + samplesPerSeries: 1, + numHistograms: 1000, + }, + { + numSeries: 10, + samplesPerSeries: 1, + numHistograms: 1, + }, + { + numSeries: 100, + samplesPerSeries: 1, + numHistograms: 1, + }, + { + numSeries: 1000, + samplesPerSeries: 1, + numHistograms: 1, + }, + } + + for _, test := range tests { + b.Run(fmt.Sprintf("numSeries:%d, samplesPerSeries:%d, numHistograms:%d", test.numSeries, test.samplesPerSeries, test.numHistograms), func(b *testing.B) { + exportRequest := generateOTLPWriteRequestWithSeries(test.numSeries, test.samplesPerSeries, test.numHistograms) + req, err := getOTLPHttpRequest(&exportRequest, pbContentType, "gzip") + require.NoError(b, err) + + b.ReportAllocs() + for b.Loop() { + recorder := httptest.NewRecorder() + handler.ServeHTTP(recorder, req) + + resp := recorder.Result() + require.Equal(b, http.StatusOK, resp.StatusCode) + req.Body.(*resetReader).Reset() + } + }) + } +} + func TestOTLPWriteHandler(t *testing.T) { cfg := distributor.OTLPConfig{ ConvertAllAttributes: false, @@ -478,6 +880,87 @@ func TestOTLPWriteHandler(t *testing.T) { } } +func generateOTLPWriteRequestWithSeries(numSeries, samplesPerSeries, numHistogram int) pmetricotlp.ExportRequest { + d := pmetric.NewMetrics() + + attributes := pcommon.NewMap() + attributes.PutStr("label1", "value1") + attributes.PutStr("label2", "value2") + attributes.PutStr("label3", "value3") + + for i := range numSeries { + metricName := fmt.Sprintf("series_%d", i) + metricUnit := fmt.Sprintf("unit_%d", i) + metricDescription := fmt.Sprintf("description_%d", i) + + resourceMetric := d.ResourceMetrics().AppendEmpty() + resourceMetric.Resource().Attributes().PutStr("service.name", "test-service") + resourceMetric.Resource().Attributes().PutStr("service.instance.id", "test-instance") + resourceMetric.Resource().Attributes().PutStr("host.name", "test-host") + + scopeMetric := resourceMetric.ScopeMetrics() + metric := scopeMetric.AppendEmpty().Metrics().AppendEmpty() + + // set metadata + metric.SetName(metricName) + metric.SetDescription(metricDescription) + metric.SetUnit(metricUnit) + metric.SetEmptyGauge() + + for j := range samplesPerSeries { + v := float64(j + i) + ts := time.Now().Add(time.Second * 30 * time.Duration(samplesPerSeries-j+1)) + dataPoint := metric.Gauge().DataPoints().AppendEmpty() + dataPoint.SetTimestamp(pcommon.NewTimestampFromTime(ts)) + dataPoint.SetDoubleValue(v) + attributes.CopyTo(dataPoint.Attributes()) + + // exemplar + exemplar := dataPoint.Exemplars().AppendEmpty() + exemplar.SetTimestamp(pcommon.NewTimestampFromTime(ts)) + exemplar.SetDoubleValue(v) + exemplar.SetSpanID(pcommon.SpanID{0, 1, 2, 3, 4, 5, 6, 7}) + exemplar.SetTraceID(pcommon.TraceID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}) + } + + for j := range numHistogram { + ts := time.Now().Add(time.Second * 30 * time.Duration(numHistogram-j+1)) + // Generate One Histogram + histogramMetric := scopeMetric.AppendEmpty().Metrics().AppendEmpty() + histogramMetric.SetName(fmt.Sprintf("test-histogram_%d", j)) + histogramMetric.SetDescription(fmt.Sprintf("test-histogram-description_%d", j)) + histogramMetric.SetEmptyHistogram() + histogramMetric.Histogram().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + + histogramDataPoint := histogramMetric.Histogram().DataPoints().AppendEmpty() + histogramDataPoint.SetTimestamp(pcommon.NewTimestampFromTime(ts)) + histogramDataPoint.ExplicitBounds().FromRaw([]float64{0.0, 1.0, 2.0, 3.0, 4.0, 5.0}) + histogramDataPoint.BucketCounts().FromRaw([]uint64{2, 2, 2, 2, 2, 2}) + histogramDataPoint.SetCount(10) + histogramDataPoint.SetSum(30.0) + attributes.CopyTo(histogramDataPoint.Attributes()) + + // Generate One Exponential-Histogram + exponentialHistogramMetric := scopeMetric.AppendEmpty().Metrics().AppendEmpty() + exponentialHistogramMetric.SetName(fmt.Sprintf("test-exponential-histogram_%d", j)) + exponentialHistogramMetric.SetDescription(fmt.Sprintf("test-exponential-histogram-description_%d", j)) + exponentialHistogramMetric.SetEmptyExponentialHistogram() + exponentialHistogramMetric.ExponentialHistogram().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + + exponentialHistogramDataPoint := exponentialHistogramMetric.ExponentialHistogram().DataPoints().AppendEmpty() + exponentialHistogramDataPoint.SetTimestamp(pcommon.NewTimestampFromTime(ts)) + exponentialHistogramDataPoint.SetScale(2.0) + exponentialHistogramDataPoint.Positive().BucketCounts().FromRaw([]uint64{2, 2, 2, 2, 2}) + exponentialHistogramDataPoint.SetZeroCount(2) + exponentialHistogramDataPoint.SetCount(10) + exponentialHistogramDataPoint.SetSum(30.0) + attributes.CopyTo(exponentialHistogramDataPoint.Attributes()) + } + } + + return pmetricotlp.NewExportRequestFromMetrics(d) +} + func generateOTLPWriteRequest() pmetricotlp.ExportRequest { d := pmetric.NewMetrics() diff --git a/pkg/util/push/push.go b/pkg/util/push/push.go index 9cabb39522..f380c4eb9b 100644 --- a/pkg/util/push/push.go +++ b/pkg/util/push/push.go @@ -2,22 +2,42 @@ package push import ( "context" + "fmt" "net/http" + "strconv" "github.com/go-kit/log/level" + "github.com/prometheus/client_golang/exp/api/remote" + "github.com/prometheus/prometheus/model/labels" + writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2" + "github.com/prometheus/prometheus/util/compression" "github.com/weaveworks/common/httpgrpc" "github.com/weaveworks/common/middleware" "github.com/cortexproject/cortex/pkg/cortexpb" "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/extract" "github.com/cortexproject/cortex/pkg/util/log" ) +const ( + remoteWriteVersionHeader = "X-Prometheus-Remote-Write-Version" + remoteWriteVersion1HeaderValue = "0.1.0" + remoteWriteVersion20HeaderValue = "2.0.0" + appProtoContentType = "application/x-protobuf" + appProtoV1ContentType = "application/x-protobuf;proto=prometheus.WriteRequest" + appProtoV2ContentType = "application/x-protobuf;proto=io.prometheus.write.v2.Request" + + rw20WrittenSamplesHeader = "X-Prometheus-Remote-Write-Samples-Written" + rw20WrittenHistogramsHeader = "X-Prometheus-Remote-Write-Histograms-Written" + rw20WrittenExemplarsHeader = "X-Prometheus-Remote-Write-Exemplars-Written" +) + // Func defines the type of the push. It is similar to http.HandlerFunc. type Func func(context.Context, *cortexpb.WriteRequest) (*cortexpb.WriteResponse, error) // Handler is a http.Handler which accepts WriteRequests. -func Handler(maxRecvMsgSize int, sourceIPs *middleware.SourceIPExtractor, push Func) http.Handler { +func Handler(remoteWrite2Enabled bool, maxRecvMsgSize int, sourceIPs *middleware.SourceIPExtractor, push Func) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ctx := r.Context() logger := log.WithContext(ctx, log.Logger) @@ -28,31 +48,219 @@ func Handler(maxRecvMsgSize int, sourceIPs *middleware.SourceIPExtractor, push F logger = log.WithSourceIPs(source, logger) } } - var req cortexpb.PreallocWriteRequest - err := util.ParseProtoReader(ctx, r.Body, int(r.ContentLength), maxRecvMsgSize, &req, util.RawSnappy) - if err != nil { - level.Error(logger).Log("err", err.Error()) - http.Error(w, err.Error(), http.StatusBadRequest) - return + + handlePRW1 := func() { + var req cortexpb.PreallocWriteRequest + err := util.ParseProtoReader(ctx, r.Body, int(r.ContentLength), maxRecvMsgSize, &req, util.RawSnappy) + if err != nil { + level.Error(logger).Log("err", err.Error()) + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + req.SkipLabelNameValidation = false + if req.Source == 0 { + req.Source = cortexpb.API + } + + if _, err := push(ctx, &req.WriteRequest); err != nil { + resp, ok := httpgrpc.HTTPResponseFromError(err) + if !ok { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + if resp.GetCode()/100 == 5 { + level.Error(logger).Log("msg", "push error", "err", err) + } else if resp.GetCode() != http.StatusAccepted && resp.GetCode() != http.StatusTooManyRequests { + level.Warn(logger).Log("msg", "push refused", "err", err) + } + http.Error(w, string(resp.Body), int(resp.Code)) + } } - req.SkipLabelNameValidation = false - if req.Source == 0 { - req.Source = cortexpb.API + handlePRW2 := func() { + var req writev2.Request + err := util.ParseProtoReader(ctx, r.Body, int(r.ContentLength), maxRecvMsgSize, &req, util.RawSnappy) + if err != nil { + level.Error(logger).Log("err", err.Error()) + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + v1Req, err := convertV2RequestToV1(&req) + if err != nil { + level.Error(logger).Log("err", err.Error()) + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + v1Req.SkipLabelNameValidation = false + if v1Req.Source == 0 { + v1Req.Source = cortexpb.API + } + + if resp, err := push(ctx, &v1Req.WriteRequest); err != nil { + resp, ok := httpgrpc.HTTPResponseFromError(err) + setPRW2RespHeader(w, 0, 0, 0) + if !ok { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + if resp.GetCode()/100 == 5 { + level.Error(logger).Log("msg", "push error", "err", err) + } else if resp.GetCode() != http.StatusAccepted && resp.GetCode() != http.StatusTooManyRequests { + level.Warn(logger).Log("msg", "push refused", "err", err) + } + http.Error(w, string(resp.Body), int(resp.Code)) + } else { + setPRW2RespHeader(w, resp.Samples, resp.Histograms, resp.Exemplars) + } } - if _, err := push(ctx, &req.WriteRequest); err != nil { - resp, ok := httpgrpc.HTTPResponseFromError(err) - if !ok { - http.Error(w, err.Error(), http.StatusInternalServerError) + if remoteWrite2Enabled { + // follow Prometheus https://github.com/prometheus/prometheus/blob/v3.3.1/storage/remote/write_handler.go#L121 + contentType := r.Header.Get("Content-Type") + if contentType == "" { + contentType = appProtoContentType + } + + msgType, err := remote.ParseProtoMsg(contentType) + if err != nil { + level.Error(logger).Log("Error decoding remote write request", "err", err) + http.Error(w, err.Error(), http.StatusUnsupportedMediaType) + return + } + + if msgType != remote.WriteV1MessageType && msgType != remote.WriteV2MessageType { + level.Error(logger).Log("Not accepted msg type", "msgType", msgType, "err", err) + http.Error(w, err.Error(), http.StatusUnsupportedMediaType) + return + } + + enc := r.Header.Get("Content-Encoding") + if enc == "" { + } else if enc != compression.Snappy { + err := fmt.Errorf("%v encoding (compression) is not accepted by this server; only %v is acceptable", enc, compression.Snappy) + level.Error(logger).Log("Error decoding remote write request", "err", err) + http.Error(w, err.Error(), http.StatusUnsupportedMediaType) return } - if resp.GetCode()/100 == 5 { - level.Error(logger).Log("msg", "push error", "err", err) - } else if resp.GetCode() != http.StatusAccepted && resp.GetCode() != http.StatusTooManyRequests { - level.Warn(logger).Log("msg", "push refused", "err", err) + + switch msgType { + case remote.WriteV1MessageType: + handlePRW1() + case remote.WriteV2MessageType: + handlePRW2() } - http.Error(w, string(resp.Body), int(resp.Code)) + } else { + handlePRW1() } }) } + +func setPRW2RespHeader(w http.ResponseWriter, samples, histograms, exemplars int64) { + w.Header().Set(rw20WrittenSamplesHeader, strconv.FormatInt(samples, 10)) + w.Header().Set(rw20WrittenHistogramsHeader, strconv.FormatInt(histograms, 10)) + w.Header().Set(rw20WrittenExemplarsHeader, strconv.FormatInt(exemplars, 10)) +} + +func convertV2RequestToV1(req *writev2.Request) (cortexpb.PreallocWriteRequest, error) { + var v1Req cortexpb.PreallocWriteRequest + v1Timeseries := make([]cortexpb.PreallocTimeseries, 0, len(req.Timeseries)) + var v1Metadata []*cortexpb.MetricMetadata + + b := labels.NewScratchBuilder(0) + symbols := req.Symbols + for _, v2Ts := range req.Timeseries { + lbs := v2Ts.ToLabels(&b, symbols) + v1Timeseries = append(v1Timeseries, cortexpb.PreallocTimeseries{ + TimeSeries: &cortexpb.TimeSeries{ + Labels: cortexpb.FromLabelsToLabelAdapters(lbs), + Samples: convertV2ToV1Samples(v2Ts.Samples), + Exemplars: convertV2ToV1Exemplars(b, symbols, v2Ts.Exemplars), + Histograms: convertV2ToV1Histograms(v2Ts.Histograms), + }, + }) + + if shouldConvertV2Metadata(v2Ts.Metadata) { + metricName, err := extract.MetricNameFromLabels(lbs) + if err != nil { + return v1Req, err + } + v1Metadata = append(v1Metadata, convertV2ToV1Metadata(metricName, symbols, v2Ts.Metadata)) + } + } + + v1Req.Timeseries = v1Timeseries + v1Req.Metadata = v1Metadata + + return v1Req, nil +} + +func shouldConvertV2Metadata(metadata writev2.Metadata) bool { + return !(metadata.HelpRef == 0 && metadata.UnitRef == 0 && metadata.Type == writev2.Metadata_METRIC_TYPE_UNSPECIFIED) //nolint:staticcheck +} + +func convertV2ToV1Histograms(histograms []writev2.Histogram) []cortexpb.Histogram { + v1Histograms := make([]cortexpb.Histogram, 0, len(histograms)) + + for _, h := range histograms { + v1Histograms = append(v1Histograms, cortexpb.HistogramWriteV2ProtoToHistogramProto(h)) + } + + return v1Histograms +} + +func convertV2ToV1Samples(samples []writev2.Sample) []cortexpb.Sample { + v1Samples := make([]cortexpb.Sample, 0, len(samples)) + + for _, s := range samples { + v1Samples = append(v1Samples, cortexpb.Sample{ + Value: s.Value, + TimestampMs: s.Timestamp, + }) + } + + return v1Samples +} + +func convertV2ToV1Metadata(name string, symbols []string, metadata writev2.Metadata) *cortexpb.MetricMetadata { + t := cortexpb.UNKNOWN + + switch metadata.Type { + case writev2.Metadata_METRIC_TYPE_COUNTER: + t = cortexpb.COUNTER + case writev2.Metadata_METRIC_TYPE_GAUGE: + t = cortexpb.GAUGE + case writev2.Metadata_METRIC_TYPE_HISTOGRAM: + t = cortexpb.HISTOGRAM + case writev2.Metadata_METRIC_TYPE_GAUGEHISTOGRAM: + t = cortexpb.GAUGEHISTOGRAM + case writev2.Metadata_METRIC_TYPE_SUMMARY: + t = cortexpb.SUMMARY + case writev2.Metadata_METRIC_TYPE_INFO: + t = cortexpb.INFO + case writev2.Metadata_METRIC_TYPE_STATESET: + t = cortexpb.STATESET + } + + return &cortexpb.MetricMetadata{ + Type: t, + MetricFamilyName: name, + Unit: symbols[metadata.UnitRef], + Help: symbols[metadata.HelpRef], + } +} + +func convertV2ToV1Exemplars(b labels.ScratchBuilder, symbols []string, v2Exemplars []writev2.Exemplar) []cortexpb.Exemplar { + v1Exemplars := make([]cortexpb.Exemplar, 0, len(v2Exemplars)) + for _, e := range v2Exemplars { + promExemplar := e.ToExemplar(&b, symbols) + v1Exemplars = append(v1Exemplars, cortexpb.Exemplar{ + Labels: cortexpb.FromLabelsToLabelAdapters(promExemplar.Labels), + Value: e.Value, + TimestampMs: e.Timestamp, + }) + } + return v1Exemplars +} diff --git a/pkg/util/push/push_test.go b/pkg/util/push/push_test.go index b806011a61..03d94d92dc 100644 --- a/pkg/util/push/push_test.go +++ b/pkg/util/push/push_test.go @@ -3,13 +3,17 @@ package push import ( "bytes" "context" + "fmt" "net/http" "net/http/httptest" "testing" "time" "github.com/golang/snappy" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/prompb" + writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2" + "github.com/prometheus/prometheus/tsdb/tsdbutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/weaveworks/common/middleware" @@ -17,30 +21,371 @@ import ( "github.com/cortexproject/cortex/pkg/cortexpb" ) +var ( + testHistogram = histogram.Histogram{ + Schema: 2, + ZeroThreshold: 1e-128, + ZeroCount: 0, + Count: 3, + Sum: 20, + PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}}, + PositiveBuckets: []int64{1}, + NegativeSpans: []histogram.Span{{Offset: 0, Length: 1}}, + NegativeBuckets: []int64{2}, + } +) + +func makeV2ReqWithSeries(num int) *writev2.Request { + ts := make([]writev2.TimeSeries, 0, num) + symbols := []string{"", "__name__", "test_metric1", "b", "c", "baz", "qux", "d", "e", "foo", "bar", "f", "g", "h", "i", "Test gauge for test purposes", "Maybe op/sec who knows (:", "Test counter for test purposes"} + for range num { + ts = append(ts, writev2.TimeSeries{ + LabelsRefs: []uint32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, + Metadata: writev2.Metadata{ + Type: writev2.Metadata_METRIC_TYPE_GAUGE, + + HelpRef: 15, + UnitRef: 16, + }, + Samples: []writev2.Sample{{Value: 1, Timestamp: 10}}, + Exemplars: []writev2.Exemplar{{LabelsRefs: []uint32{11, 12}, Value: 1, Timestamp: 10}}, + Histograms: []writev2.Histogram{ + writev2.FromIntHistogram(10, &testHistogram), + writev2.FromFloatHistogram(20, testHistogram.ToFloat(nil)), + }, + }) + } + + return &writev2.Request{ + Symbols: symbols, + Timeseries: ts, + } +} + +func createPRW1HTTPRequest(seriesNum int) (*http.Request, error) { + series := makeV2ReqWithSeries(seriesNum) + v1Req, err := convertV2RequestToV1(series) + if err != nil { + return nil, err + } + protobuf, err := v1Req.Marshal() + if err != nil { + return nil, err + } + + body := snappy.Encode(nil, protobuf) + req, err := http.NewRequest("POST", "http://localhost/", newResetReader(body)) + if err != nil { + return nil, err + } + + req.Header.Add("Content-Encoding", "snappy") + req.Header.Set("Content-Type", appProtoContentType) + req.Header.Set("X-Prometheus-Remote-Write-Version", remoteWriteVersion1HeaderValue) + req.ContentLength = int64(len(body)) + return req, nil +} + +func createPRW2HTTPRequest(seriesNum int) (*http.Request, error) { + series := makeV2ReqWithSeries(seriesNum) + protobuf, err := series.Marshal() + if err != nil { + return nil, err + } + + body := snappy.Encode(nil, protobuf) + req, err := http.NewRequest("POST", "http://localhost/", newResetReader(body)) + if err != nil { + return nil, err + } + + req.Header.Add("Content-Encoding", "snappy") + req.Header.Set("Content-Type", appProtoV2ContentType) + req.Header.Set("X-Prometheus-Remote-Write-Version", remoteWriteVersion20HeaderValue) + req.ContentLength = int64(len(body)) + return req, nil +} + +func Benchmark_Handler(b *testing.B) { + mockHandler := func(context.Context, *cortexpb.WriteRequest) (*cortexpb.WriteResponse, error) { + // Nothing to do. + return &cortexpb.WriteResponse{}, nil + } + testSeriesNums := []int{10, 100, 500, 1000} + for _, seriesNum := range testSeriesNums { + b.Run(fmt.Sprintf("PRW1 with %d series", seriesNum), func(b *testing.B) { + handler := Handler(true, 1000000, nil, mockHandler) + req, err := createPRW1HTTPRequest(seriesNum) + require.NoError(b, err) + + b.ReportAllocs() + + for b.Loop() { + resp := httptest.NewRecorder() + handler.ServeHTTP(resp, req) + assert.Equal(b, http.StatusOK, resp.Code) + req.Body.(*resetReader).Reset() + } + }) + b.Run(fmt.Sprintf("PRW2 with %d series", seriesNum), func(b *testing.B) { + handler := Handler(true, 1000000, nil, mockHandler) + req, err := createPRW2HTTPRequest(seriesNum) + require.NoError(b, err) + + b.ReportAllocs() + + for b.Loop() { + resp := httptest.NewRecorder() + handler.ServeHTTP(resp, req) + assert.Equal(b, http.StatusOK, resp.Code) + req.Body.(*resetReader).Reset() + } + }) + } +} + +func Benchmark_convertV2RequestToV1(b *testing.B) { + testSeriesNums := []int{100, 500, 1000} + + for _, seriesNum := range testSeriesNums { + b.Run(fmt.Sprintf("%d series", seriesNum), func(b *testing.B) { + series := makeV2ReqWithSeries(seriesNum) + + b.ReportAllocs() + for b.Loop() { + _, err := convertV2RequestToV1(series) + require.NoError(b, err) + } + }) + } +} + +func Test_convertV2RequestToV1(t *testing.T) { + var v2Req writev2.Request + + fh := tsdbutil.GenerateTestFloatHistogram(1) + ph := writev2.FromFloatHistogram(4, fh) + + symbols := []string{"", "__name__", "test_metric", "b", "c", "baz", "qux", "d", "e", "foo", "bar", "f", "g", "h", "i", "Test gauge for test purposes", "Maybe op/sec who knows (:", "Test counter for test purposes"} + timeseries := []writev2.TimeSeries{ + { + LabelsRefs: []uint32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, + Metadata: writev2.Metadata{ + Type: writev2.Metadata_METRIC_TYPE_COUNTER, + + HelpRef: 15, + UnitRef: 16, + }, + Samples: []writev2.Sample{{Value: 1, Timestamp: 1}}, + Exemplars: []writev2.Exemplar{{LabelsRefs: []uint32{11, 12}, Value: 1, Timestamp: 1}}, + }, + { + LabelsRefs: []uint32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, + Samples: []writev2.Sample{{Value: 2, Timestamp: 2}}, + }, + { + LabelsRefs: []uint32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, + Samples: []writev2.Sample{{Value: 3, Timestamp: 3}}, + }, + { + LabelsRefs: []uint32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, + Histograms: []writev2.Histogram{ph, ph}, + Exemplars: []writev2.Exemplar{{LabelsRefs: []uint32{11, 12}, Value: 1, Timestamp: 1}}, + }, + } + + v2Req.Symbols = symbols + v2Req.Timeseries = timeseries + v1Req, err := convertV2RequestToV1(&v2Req) + assert.NoError(t, err) + expectedSamples := 3 + expectedExemplars := 2 + expectedHistograms := 2 + countSamples := 0 + countExemplars := 0 + countHistograms := 0 + + for _, ts := range v1Req.Timeseries { + countSamples += len(ts.Samples) + countExemplars += len(ts.Exemplars) + countHistograms += len(ts.Histograms) + } + + assert.Equal(t, expectedSamples, countSamples) + assert.Equal(t, expectedExemplars, countExemplars) + assert.Equal(t, expectedHistograms, countHistograms) + assert.Equal(t, 4, len(v1Req.Timeseries)) + assert.Equal(t, 1, len(v1Req.Metadata)) +} + func TestHandler_remoteWrite(t *testing.T) { - req := createRequest(t, createPrometheusRemoteWriteProtobuf(t)) - resp := httptest.NewRecorder() - handler := Handler(100000, nil, verifyWriteRequestHandler(t, cortexpb.API)) - handler.ServeHTTP(resp, req) - assert.Equal(t, 200, resp.Code) + t.Run("remote write v1", func(t *testing.T) { + handler := Handler(true, 100000, nil, verifyWriteRequestHandler(t, cortexpb.API)) + req := createRequest(t, createPrometheusRemoteWriteProtobuf(t), false) + resp := httptest.NewRecorder() + handler.ServeHTTP(resp, req) + assert.Equal(t, http.StatusOK, resp.Code) + }) + t.Run("remote write v2", func(t *testing.T) { + handler := Handler(true, 100000, nil, verifyWriteRequestHandler(t, cortexpb.API)) + req := createRequest(t, createPrometheusRemoteWriteV2Protobuf(t), true) + resp := httptest.NewRecorder() + handler.ServeHTTP(resp, req) + assert.Equal(t, http.StatusOK, resp.Code) + + // test header value + respHeader := resp.Header() + assert.Equal(t, "1", respHeader[rw20WrittenSamplesHeader][0]) + assert.Equal(t, "1", respHeader[rw20WrittenHistogramsHeader][0]) + assert.Equal(t, "1", respHeader[rw20WrittenExemplarsHeader][0]) + }) +} + +func TestHandler_ContentTypeAndEncoding(t *testing.T) { + sourceIPs, _ := middleware.NewSourceIPs("SomeField", "(.*)") + handler := Handler(true, 100000, sourceIPs, verifyWriteRequestHandler(t, cortexpb.API)) + + tests := []struct { + description string + reqHeaders map[string]string + expectedCode int + isV2 bool + }{ + { + description: "[RW 2.0] correct content-type", + reqHeaders: map[string]string{ + "Content-Type": appProtoV2ContentType, + "Content-Encoding": "snappy", + remoteWriteVersionHeader: "2.0.0", + }, + expectedCode: http.StatusOK, + isV2: true, + }, + { + description: "[RW 1.0] correct content-type", + reqHeaders: map[string]string{ + "Content-Type": appProtoV1ContentType, + "Content-Encoding": "snappy", + remoteWriteVersionHeader: "0.1.0", + }, + expectedCode: http.StatusOK, + isV2: false, + }, + { + description: "[RW 2.0] wrong content-type", + reqHeaders: map[string]string{ + "Content-Type": "yolo", + "Content-Encoding": "snappy", + remoteWriteVersionHeader: "2.0.0", + }, + expectedCode: http.StatusUnsupportedMediaType, + isV2: true, + }, + { + description: "[RW 2.0] wrong content-type", + reqHeaders: map[string]string{ + "Content-Type": "application/x-protobuf;proto=yolo", + "Content-Encoding": "snappy", + remoteWriteVersionHeader: "2.0.0", + }, + expectedCode: http.StatusUnsupportedMediaType, + isV2: true, + }, + { + description: "[RW 2.0] wrong content-encoding", + reqHeaders: map[string]string{ + "Content-Type": "application/x-protobuf;proto=io.prometheus.write.v2.Request", + "Content-Encoding": "zstd", + remoteWriteVersionHeader: "2.0.0", + }, + expectedCode: http.StatusUnsupportedMediaType, + isV2: true, + }, + { + description: "no header, should treated as RW 1.0", + expectedCode: http.StatusOK, + isV2: false, + }, + { + description: "missing content-type, should treated as RW 1.0", + reqHeaders: map[string]string{ + "Content-Encoding": "snappy", + remoteWriteVersionHeader: "2.0.0", + }, + expectedCode: http.StatusOK, + isV2: false, + }, + { + description: "missing content-encoding", + reqHeaders: map[string]string{ + "Content-Type": appProtoV2ContentType, + remoteWriteVersionHeader: "2.0.0", + }, + expectedCode: http.StatusOK, + isV2: true, + }, + { + description: "missing remote write version, should treated based on Content-type", + reqHeaders: map[string]string{ + "Content-Type": appProtoV2ContentType, + "Content-Encoding": "snappy", + }, + expectedCode: http.StatusOK, + isV2: true, + }, + { + description: "missing remote write version, should treated based on Content-type", + reqHeaders: map[string]string{ + "Content-Type": appProtoV1ContentType, + "Content-Encoding": "snappy", + }, + expectedCode: http.StatusOK, + isV2: false, + }, + } + + for _, test := range tests { + t.Run(test.description, func(t *testing.T) { + if test.isV2 { + req := createRequestWithHeaders(t, test.reqHeaders, createCortexRemoteWriteV2Protobuf(t, false, cortexpb.API)) + resp := httptest.NewRecorder() + handler.ServeHTTP(resp, req) + assert.Equal(t, test.expectedCode, resp.Code) + } else { + req := createRequestWithHeaders(t, test.reqHeaders, createCortexWriteRequestProtobuf(t, false, cortexpb.API)) + resp := httptest.NewRecorder() + handler.ServeHTTP(resp, req) + assert.Equal(t, test.expectedCode, resp.Code) + } + }) + } } func TestHandler_cortexWriteRequest(t *testing.T) { - req := createRequest(t, createCortexWriteRequestProtobuf(t, false)) - resp := httptest.NewRecorder() sourceIPs, _ := middleware.NewSourceIPs("SomeField", "(.*)") - handler := Handler(100000, sourceIPs, verifyWriteRequestHandler(t, cortexpb.RULE)) - handler.ServeHTTP(resp, req) - assert.Equal(t, 200, resp.Code) + handler := Handler(true, 100000, sourceIPs, verifyWriteRequestHandler(t, cortexpb.API)) + + t.Run("remote write v1", func(t *testing.T) { + req := createRequest(t, createCortexWriteRequestProtobuf(t, false, cortexpb.API), false) + resp := httptest.NewRecorder() + handler.ServeHTTP(resp, req) + assert.Equal(t, 200, resp.Code) + }) + t.Run("remote write v2", func(t *testing.T) { + req := createRequest(t, createCortexRemoteWriteV2Protobuf(t, false, cortexpb.API), true) + resp := httptest.NewRecorder() + handler.ServeHTTP(resp, req) + assert.Equal(t, 200, resp.Code) + }) } func TestHandler_ignoresSkipLabelNameValidationIfSet(t *testing.T) { for _, req := range []*http.Request{ - createRequest(t, createCortexWriteRequestProtobuf(t, true)), - createRequest(t, createCortexWriteRequestProtobuf(t, false)), + createRequest(t, createCortexWriteRequestProtobuf(t, true, cortexpb.RULE), false), + createRequest(t, createCortexWriteRequestProtobuf(t, true, cortexpb.RULE), false), } { resp := httptest.NewRecorder() - handler := Handler(100000, nil, verifyWriteRequestHandler(t, cortexpb.RULE)) + handler := Handler(true, 100000, nil, verifyWriteRequestHandler(t, cortexpb.RULE)) handler.ServeHTTP(resp, req) assert.Equal(t, 200, resp.Code) } @@ -54,21 +399,86 @@ func verifyWriteRequestHandler(t *testing.T, expectSource cortexpb.WriteRequest_ assert.Equal(t, "foo", request.Timeseries[0].Labels[0].Value) assert.Equal(t, expectSource, request.Source) assert.False(t, request.SkipLabelNameValidation) - return &cortexpb.WriteResponse{}, nil + + resp := &cortexpb.WriteResponse{ + Samples: 1, + Histograms: 1, + Exemplars: 1, + } + + return resp, nil } } -func createRequest(t *testing.T, protobuf []byte) *http.Request { +func createRequestWithHeaders(t *testing.T, headers map[string]string, protobuf []byte) *http.Request { t.Helper() inoutBytes := snappy.Encode(nil, protobuf) req, err := http.NewRequest("POST", "http://localhost/", bytes.NewReader(inoutBytes)) require.NoError(t, err) + + for k, v := range headers { + req.Header.Set(k, v) + } + return req +} + +func createRequest(t *testing.T, protobuf []byte, isV2 bool) *http.Request { + t.Helper() + inoutBytes := snappy.Encode(nil, protobuf) + req, err := http.NewRequest("POST", "http://localhost/", bytes.NewReader(inoutBytes)) + require.NoError(t, err) + req.Header.Add("Content-Encoding", "snappy") - req.Header.Set("Content-Type", "application/x-protobuf") - req.Header.Set("X-Prometheus-Remote-Write-Version", "0.1.0") + + if isV2 { + req.Header.Set("Content-Type", appProtoV2ContentType) + req.Header.Set("X-Prometheus-Remote-Write-Version", remoteWriteVersion20HeaderValue) + return req + } + + req.Header.Set("Content-Type", appProtoContentType) + req.Header.Set("X-Prometheus-Remote-Write-Version", remoteWriteVersion1HeaderValue) return req } +func createCortexRemoteWriteV2Protobuf(t *testing.T, skipLabelNameValidation bool, source cortexpb.WriteRequest_SourceEnum) []byte { + t.Helper() + input := writev2.Request{ + Symbols: []string{"", "__name__", "foo"}, + Timeseries: []writev2.TimeSeries{ + { + LabelsRefs: []uint32{1, 2}, + Samples: []writev2.Sample{ + {Value: 1, Timestamp: time.Date(2020, 4, 1, 0, 0, 0, 0, time.UTC).UnixNano()}, + }, + }, + }, + } + + inoutBytes, err := input.Marshal() + require.NoError(t, err) + return inoutBytes +} + +func createPrometheusRemoteWriteV2Protobuf(t *testing.T) []byte { + t.Helper() + input := writev2.Request{ + Symbols: []string{"", "__name__", "foo"}, + Timeseries: []writev2.TimeSeries{ + { + LabelsRefs: []uint32{1, 2}, + Samples: []writev2.Sample{ + {Value: 1, Timestamp: time.Date(2020, 4, 1, 0, 0, 0, 0, time.UTC).UnixNano()}, + }, + }, + }, + } + + inoutBytes, err := input.Marshal() + require.NoError(t, err) + return inoutBytes +} + func createPrometheusRemoteWriteProtobuf(t *testing.T) []byte { t.Helper() input := prompb.WriteRequest{ @@ -87,7 +497,7 @@ func createPrometheusRemoteWriteProtobuf(t *testing.T) []byte { require.NoError(t, err) return inoutBytes } -func createCortexWriteRequestProtobuf(t *testing.T, skipLabelNameValidation bool) []byte { +func createCortexWriteRequestProtobuf(t *testing.T, skipLabelNameValidation bool, source cortexpb.WriteRequest_SourceEnum) []byte { t.Helper() ts := cortexpb.PreallocTimeseries{ TimeSeries: &cortexpb.TimeSeries{ @@ -101,7 +511,7 @@ func createCortexWriteRequestProtobuf(t *testing.T, skipLabelNameValidation bool } input := cortexpb.WriteRequest{ Timeseries: []cortexpb.PreallocTimeseries{ts}, - Source: cortexpb.RULE, + Source: source, SkipLabelNameValidation: skipLabelNameValidation, } inoutBytes, err := input.Marshal() diff --git a/pkg/util/requestmeta/context.go b/pkg/util/requestmeta/context.go new file mode 100644 index 0000000000..43ee33c7bc --- /dev/null +++ b/pkg/util/requestmeta/context.go @@ -0,0 +1,76 @@ +package requestmeta + +import ( + "context" + "net/http" + "net/textproto" + + "google.golang.org/grpc/metadata" +) + +type contextKey int + +const ( + requestMetadataContextKey contextKey = 0 + PropagationStringForRequestMetadata string = "x-request-metadata-propagation-string" + // HeaderPropagationStringForRequestLogging is used for backwards compatibility + HeaderPropagationStringForRequestLogging string = "x-http-header-forwarding-logging" +) + +func ContextWithRequestMetadataMap(ctx context.Context, requestContextMap map[string]string) context.Context { + return context.WithValue(ctx, requestMetadataContextKey, requestContextMap) +} + +func MapFromContext(ctx context.Context) map[string]string { + requestContextMap, ok := ctx.Value(requestMetadataContextKey).(map[string]string) + if !ok { + return nil + } + return requestContextMap +} + +// ContextWithRequestMetadataMapFromHeaders adds MetadataContext headers to context and Removes non-existent headers. +// targetHeaders is passed for backwards compatibility, otherwise header keys should be in header itself. +func ContextWithRequestMetadataMapFromHeaders(ctx context.Context, headers map[string]string, targetHeaders []string) context.Context { + headerMap := make(map[string]string) + loggingHeaders := headers[textproto.CanonicalMIMEHeaderKey(LoggingHeadersKey)] + headerKeys := targetHeaders + if loggingHeaders != "" { + headerKeys = LoggingHeaderKeysFromString(loggingHeaders) + headerKeys = append(headerKeys, LoggingHeadersKey) + } + headerKeys = append(headerKeys, RequestIdKey) + headerKeys = append(headerKeys, RequestSourceKey) + for _, header := range headerKeys { + if v, ok := headers[textproto.CanonicalMIMEHeaderKey(header)]; ok { + headerMap[header] = v + } + } + return ContextWithRequestMetadataMap(ctx, headerMap) +} + +func InjectMetadataIntoHTTPRequestHeaders(requestMetadataMap map[string]string, request *http.Request) { + for key, contents := range requestMetadataMap { + request.Header.Add(key, contents) + } +} + +func ContextWithRequestMetadataMapFromMetadata(ctx context.Context, md metadata.MD) context.Context { + headersSlice, ok := md[PropagationStringForRequestMetadata] + + // we want to check old key if no data + if !ok { + headersSlice, ok = md[HeaderPropagationStringForRequestLogging] + } + + if !ok || len(headersSlice)%2 == 1 { + return ctx + } + + requestMetadataMap := make(map[string]string) + for i := 0; i < len(headersSlice); i += 2 { + requestMetadataMap[headersSlice[i]] = headersSlice[i+1] + } + + return ContextWithRequestMetadataMap(ctx, requestMetadataMap) +} diff --git a/pkg/util/requestmeta/context_test.go b/pkg/util/requestmeta/context_test.go new file mode 100644 index 0000000000..23a0d3b4da --- /dev/null +++ b/pkg/util/requestmeta/context_test.go @@ -0,0 +1,113 @@ +package requestmeta + +import ( + "context" + "net/http" + "net/textproto" + "testing" + + "github.com/stretchr/testify/require" + "google.golang.org/grpc/metadata" +) + +func TestRequestMetadataMapFromMetadata(t *testing.T) { + md := metadata.New(nil) + md.Append(PropagationStringForRequestMetadata, "TestHeader1", "SomeInformation", "TestHeader2", "ContentsOfTestHeader2") + + ctx := context.Background() + + ctx = ContextWithRequestMetadataMapFromMetadata(ctx, md) + + requestMetadataMap := MapFromContext(ctx) + + require.Contains(t, requestMetadataMap, "TestHeader1") + require.Contains(t, requestMetadataMap, "TestHeader2") + require.Equal(t, "SomeInformation", requestMetadataMap["TestHeader1"]) + require.Equal(t, "ContentsOfTestHeader2", requestMetadataMap["TestHeader2"]) +} + +func TestRequestMetadataMapFromMetadataWithImproperLength(t *testing.T) { + md := metadata.New(nil) + md.Append(PropagationStringForRequestMetadata, "TestHeader1", "SomeInformation", "TestHeader2", "ContentsOfTestHeader2", "Test3") + + ctx := context.Background() + + ctx = ContextWithRequestMetadataMapFromMetadata(ctx, md) + + requestMetadataMap := MapFromContext(ctx) + require.Nil(t, requestMetadataMap) +} + +func TestContextWithRequestMetadataMapFromHeaders_WithLoggingHeaders(t *testing.T) { + headers := map[string]string{ + textproto.CanonicalMIMEHeaderKey("X-Request-ID"): "1234", + textproto.CanonicalMIMEHeaderKey("X-User-ID"): "user5678", + textproto.CanonicalMIMEHeaderKey(LoggingHeadersKey): "X-Request-ID,X-User-ID", + } + + ctx := context.Background() + ctx = ContextWithRequestMetadataMapFromHeaders(ctx, headers, nil) + + requestMetadataMap := MapFromContext(ctx) + + require.Contains(t, requestMetadataMap, "X-Request-ID") + require.Contains(t, requestMetadataMap, "X-User-ID") + require.Equal(t, "1234", requestMetadataMap["X-Request-ID"]) + require.Equal(t, "user5678", requestMetadataMap["X-User-ID"]) +} + +func TestContextWithRequestMetadataMapFromHeaders_BackwardCompatibleTargetHeaders(t *testing.T) { + headers := map[string]string{ + textproto.CanonicalMIMEHeaderKey("X-Legacy-Header"): "legacy-value", + } + + ctx := context.Background() + ctx = ContextWithRequestMetadataMapFromHeaders(ctx, headers, []string{"X-Legacy-Header"}) + + requestMetadataMap := MapFromContext(ctx) + + require.Contains(t, requestMetadataMap, "X-Legacy-Header") + require.Equal(t, "legacy-value", requestMetadataMap["X-Legacy-Header"]) +} + +func TestContextWithRequestMetadataMapFromHeaders_OnlyMatchingKeysUsed(t *testing.T) { + headers := map[string]string{ + textproto.CanonicalMIMEHeaderKey("X-Some-Header"): "value1", + textproto.CanonicalMIMEHeaderKey("Unused-Header"): "value2", + textproto.CanonicalMIMEHeaderKey(LoggingHeadersKey): "X-Some-Header", + } + + ctx := context.Background() + ctx = ContextWithRequestMetadataMapFromHeaders(ctx, headers, nil) + + requestMetadataMap := MapFromContext(ctx) + + require.Equal(t, "value1", requestMetadataMap["X-Some-Header"]) +} + +func TestInjectMetadataIntoHTTPRequestHeaders(t *testing.T) { + contentsMap := make(map[string]string) + contentsMap["TestHeader1"] = "RequestID" + contentsMap["TestHeader2"] = "ContentsOfTestHeader2" + + h := http.Header{} + req := &http.Request{ + Method: "GET", + RequestURI: "/HTTPHeaderTest", + Body: http.NoBody, + Header: h, + } + InjectMetadataIntoHTTPRequestHeaders(contentsMap, req) + + header1 := req.Header.Values("TestHeader1") + header2 := req.Header.Values("TestHeader2") + + require.NotNil(t, header1) + require.NotNil(t, header2) + require.Equal(t, 1, len(header1)) + require.Equal(t, 1, len(header2)) + + require.Equal(t, "RequestID", header1[0]) + require.Equal(t, "ContentsOfTestHeader2", header2[0]) + +} diff --git a/pkg/util/requestmeta/id.go b/pkg/util/requestmeta/id.go new file mode 100644 index 0000000000..01b34e430a --- /dev/null +++ b/pkg/util/requestmeta/id.go @@ -0,0 +1,22 @@ +package requestmeta + +import "context" + +const RequestIdKey = "x-cortex-request-id" + +func RequestIdFromContext(ctx context.Context) string { + metadataMap := MapFromContext(ctx) + if metadataMap == nil { + return "" + } + return metadataMap[RequestIdKey] +} + +func ContextWithRequestId(ctx context.Context, reqId string) context.Context { + metadataMap := MapFromContext(ctx) + if metadataMap == nil { + metadataMap = make(map[string]string) + } + metadataMap[RequestIdKey] = reqId + return ContextWithRequestMetadataMap(ctx, metadataMap) +} diff --git a/pkg/util/requestmeta/logging_headers.go b/pkg/util/requestmeta/logging_headers.go new file mode 100644 index 0000000000..02b2a4270b --- /dev/null +++ b/pkg/util/requestmeta/logging_headers.go @@ -0,0 +1,60 @@ +package requestmeta + +import ( + "context" + "strings" +) + +const ( + LoggingHeadersKey = "x-request-logging-headers-key" + loggingHeadersDelimiter = "," +) + +func LoggingHeaderKeysToString(targetHeaders []string) string { + return strings.Join(targetHeaders, loggingHeadersDelimiter) +} + +func LoggingHeaderKeysFromString(headerKeysString string) []string { + return strings.Split(headerKeysString, loggingHeadersDelimiter) +} + +func LoggingHeadersFromContext(ctx context.Context) map[string]string { + metadataMap := MapFromContext(ctx) + if metadataMap == nil { + return nil + } + loggingHeadersString := metadataMap[LoggingHeadersKey] + if loggingHeadersString == "" { + // Backward compatibility: if no specific headers are listed, return all metadata excluding requestId and source + result := make(map[string]string, len(metadataMap)) + for k, v := range metadataMap { + if k == RequestIdKey || k == RequestSourceKey { + continue + } + result[k] = v + } + return result + } + + result := make(map[string]string) + for _, header := range LoggingHeaderKeysFromString(loggingHeadersString) { + if v, ok := metadataMap[header]; ok { + result[header] = v + } + } + return result +} + +func LoggingHeadersAndRequestIdFromContext(ctx context.Context) map[string]string { + metadataMap := MapFromContext(ctx) + if metadataMap == nil { + return nil + } + + loggingHeaders := LoggingHeadersFromContext(ctx) + if reqId := RequestIdFromContext(ctx); reqId != "" { + loggingHeaders[RequestIdKey] = reqId + } + + return loggingHeaders +} diff --git a/pkg/util/requestmeta/source.go b/pkg/util/requestmeta/source.go new file mode 100644 index 0000000000..6f0f23db06 --- /dev/null +++ b/pkg/util/requestmeta/source.go @@ -0,0 +1,27 @@ +package requestmeta + +import "context" + +const RequestSourceKey = "x-cortex-request-source" + +const ( + SourceAPI = "api" + SourceRuler = "ruler" +) + +func ContextWithRequestSource(ctx context.Context, source string) context.Context { + metadataMap := MapFromContext(ctx) + if metadataMap == nil { + metadataMap = make(map[string]string) + } + metadataMap[RequestSourceKey] = source + return ContextWithRequestMetadataMap(ctx, metadataMap) +} + +func RequestFromRuler(ctx context.Context) bool { + metadataMap := MapFromContext(ctx) + if metadataMap == nil { + return false + } + return metadataMap[RequestSourceKey] == SourceRuler +} diff --git a/pkg/util/runtimeconfig/manager.go b/pkg/util/runtimeconfig/manager.go index f4bff920ec..7479f5cdca 100644 --- a/pkg/util/runtimeconfig/manager.go +++ b/pkg/util/runtimeconfig/manager.go @@ -24,7 +24,7 @@ import ( type BucketClientFactory func(ctx context.Context) (objstore.Bucket, error) // Loader loads the configuration from file. -type Loader func(r io.Reader) (interface{}, error) +type Loader func(r io.Reader) (any, error) // Config holds the config for an Manager instance. // It holds config related to loading per-tenant config. @@ -55,10 +55,10 @@ type Manager struct { logger log.Logger listenersMtx sync.Mutex - listeners []chan interface{} + listeners []chan any configMtx sync.RWMutex - config interface{} + config any configLoadSuccess prometheus.Gauge configHash *prometheus.GaugeVec @@ -115,8 +115,8 @@ func (om *Manager) starting(ctx context.Context) error { // // When config manager is stopped, it closes all channels to notify receivers that they will // not receive any more updates. -func (om *Manager) CreateListenerChannel(buffer int) <-chan interface{} { - ch := make(chan interface{}, buffer) +func (om *Manager) CreateListenerChannel(buffer int) <-chan any { + ch := make(chan any, buffer) om.listenersMtx.Lock() defer om.listenersMtx.Unlock() @@ -126,7 +126,7 @@ func (om *Manager) CreateListenerChannel(buffer int) <-chan interface{} { } // CloseListenerChannel removes given channel from list of channels to send notifications to and closes channel. -func (om *Manager) CloseListenerChannel(listener <-chan interface{}) { +func (om *Manager) CloseListenerChannel(listener <-chan any) { om.listenersMtx.Lock() defer om.listenersMtx.Unlock() @@ -205,13 +205,13 @@ func (om *Manager) loadConfigFromBucket(ctx context.Context) ([]byte, error) { return buf, err } -func (om *Manager) setConfig(config interface{}) { +func (om *Manager) setConfig(config any) { om.configMtx.Lock() defer om.configMtx.Unlock() om.config = config } -func (om *Manager) callListeners(newValue interface{}) { +func (om *Manager) callListeners(newValue any) { om.listenersMtx.Lock() defer om.listenersMtx.Unlock() @@ -238,7 +238,7 @@ func (om *Manager) stopping(_ error) error { } // GetConfig returns last loaded config value, possibly nil. -func (om *Manager) GetConfig() interface{} { +func (om *Manager) GetConfig() any { om.configMtx.RLock() defer om.configMtx.RUnlock() diff --git a/pkg/util/runtimeconfig/manager_test.go b/pkg/util/runtimeconfig/manager_test.go index d68056e0fd..df14986a61 100644 --- a/pkg/util/runtimeconfig/manager_test.go +++ b/pkg/util/runtimeconfig/manager_test.go @@ -39,7 +39,7 @@ type testOverrides struct { } // UnmarshalYAML implements the yaml.Unmarshaler interface. -func (l *TestLimits) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (l *TestLimits) UnmarshalYAML(unmarshal func(any) error) error { if defaultTestLimits != nil { *l = *defaultTestLimits } @@ -47,7 +47,7 @@ func (l *TestLimits) UnmarshalYAML(unmarshal func(interface{}) error) error { return unmarshal((*plain)(l)) } -func testLoadOverrides(r io.Reader) (interface{}, error) { +func testLoadOverrides(r io.Reader) (any, error) { var overrides = &testOverrides{} decoder := yaml.NewDecoder(r) @@ -74,7 +74,7 @@ func newTestOverridesManagerConfig(t *testing.T, i int32) (*atomic.Int32, Config return config, Config{ ReloadPeriod: 5 * time.Second, LoadPath: tempFile.Name(), - Loader: func(_ io.Reader) (i interface{}, err error) { + Loader: func(_ io.Reader) (i any, err error) { val := int(config.Load()) return val, nil }, @@ -181,7 +181,7 @@ func TestManager_ListenerWithDefaultLimits(t *testing.T) { err = overridesManager.loadConfig(context.TODO()) require.NoError(t, err) - var newValue interface{} + var newValue any select { case newValue = <-ch: // ok diff --git a/pkg/util/runutil/runutil.go b/pkg/util/runutil/runutil.go index 421f774276..b8303e05b3 100644 --- a/pkg/util/runutil/runutil.go +++ b/pkg/util/runutil/runutil.go @@ -20,7 +20,7 @@ func CloseWithErrCapture(err *error, closer io.Closer, msg string) { // CloseWithLogOnErr closes an io.Closer and logs any relevant error from it wrapped with the provided format string and // args. -func CloseWithLogOnErr(logger log.Logger, closer io.Closer, format string, args ...interface{}) { +func CloseWithLogOnErr(logger log.Logger, closer io.Closer, format string, args ...any) { err := closer.Close() if err == nil || errors.Is(err, os.ErrClosed) { return diff --git a/pkg/util/runutil/runutil_test.go b/pkg/util/runutil/runutil_test.go index b2392185c2..13b83b0e32 100644 --- a/pkg/util/runutil/runutil_test.go +++ b/pkg/util/runutil/runutil_test.go @@ -16,7 +16,7 @@ func TestCloseWithLogOnErr(t *testing.T) { CloseWithLogOnErr(&logger, closer, "closing failed") - assert.Equal(t, []interface{}{ + assert.Equal(t, []any{ "level", level.WarnValue(), "msg", "detected close error", "err", "closing failed: an error", }, logger.keyvals) }) @@ -49,10 +49,10 @@ func (c fakeCloser) Close() error { } type fakeLogger struct { - keyvals []interface{} + keyvals []any } -func (l *fakeLogger) Log(keyvals ...interface{}) error { +func (l *fakeLogger) Log(keyvals ...any) error { l.keyvals = keyvals return nil } diff --git a/pkg/util/services/basic_service_test.go b/pkg/util/services/basic_service_test.go index 0856376a5d..8a12268d9a 100644 --- a/pkg/util/services/basic_service_test.go +++ b/pkg/util/services/basic_service_test.go @@ -318,8 +318,7 @@ func TestServiceName(t *testing.T) { s := NewIdleService(nil, nil).WithName("test name") require.Equal(t, "test name", DescribeService(s)) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() require.NoError(t, s.StartAsync(ctx)) // once service has started, BasicService will not allow changing the name diff --git a/pkg/util/spanlogger/noop.go b/pkg/util/spanlogger/noop.go index 8c7480ec89..72943361a7 100644 --- a/pkg/util/spanlogger/noop.go +++ b/pkg/util/spanlogger/noop.go @@ -25,15 +25,15 @@ func (n noopSpanContext) ForeachBaggageItem(handler func(k, v string) bool) {} func (n noopSpan) Context() opentracing.SpanContext { return defaultNoopSpanContext } func (n noopSpan) SetBaggageItem(key, val string) opentracing.Span { return defaultNoopSpan } func (n noopSpan) BaggageItem(key string) string { return emptyString } -func (n noopSpan) SetTag(key string, value interface{}) opentracing.Span { return n } +func (n noopSpan) SetTag(key string, value any) opentracing.Span { return n } func (n noopSpan) LogFields(fields ...log.Field) {} -func (n noopSpan) LogKV(keyVals ...interface{}) {} +func (n noopSpan) LogKV(keyVals ...any) {} func (n noopSpan) Finish() {} func (n noopSpan) FinishWithOptions(opts opentracing.FinishOptions) {} func (n noopSpan) SetOperationName(operationName string) opentracing.Span { return n } func (n noopSpan) Tracer() opentracing.Tracer { return defaultNoopTracer } func (n noopSpan) LogEvent(event string) {} -func (n noopSpan) LogEventWithPayload(event string, payload interface{}) {} +func (n noopSpan) LogEventWithPayload(event string, payload any) {} func (n noopSpan) Log(data opentracing.LogData) {} // StartSpan belongs to the Tracer interface. @@ -42,11 +42,11 @@ func (n noopTracer) StartSpan(operationName string, opts ...opentracing.StartSpa } // Inject belongs to the Tracer interface. -func (n noopTracer) Inject(sp opentracing.SpanContext, format interface{}, carrier interface{}) error { +func (n noopTracer) Inject(sp opentracing.SpanContext, format any, carrier any) error { return nil } // Extract belongs to the Tracer interface. -func (n noopTracer) Extract(format interface{}, carrier interface{}) (opentracing.SpanContext, error) { +func (n noopTracer) Extract(format any, carrier any) (opentracing.SpanContext, error) { return nil, opentracing.ErrSpanContextNotFound } diff --git a/pkg/util/spanlogger/spanlogger.go b/pkg/util/spanlogger/spanlogger.go index a96f95726f..cde7ae045a 100644 --- a/pkg/util/spanlogger/spanlogger.go +++ b/pkg/util/spanlogger/spanlogger.go @@ -30,14 +30,14 @@ type SpanLogger struct { } // New makes a new SpanLogger, where logs will be sent to the global logger. -func New(ctx context.Context, method string, kvps ...interface{}) (*SpanLogger, context.Context) { +func New(ctx context.Context, method string, kvps ...any) (*SpanLogger, context.Context) { return NewWithLogger(ctx, util_log.Logger, method, kvps...) } // NewWithLogger makes a new SpanLogger with a custom log.Logger to send logs // to. The provided context will have the logger attached to it and can be // retrieved with FromContext or FromContextWithFallback. -func NewWithLogger(ctx context.Context, l log.Logger, method string, kvps ...interface{}) (*SpanLogger, context.Context) { +func NewWithLogger(ctx context.Context, l log.Logger, method string, kvps ...any) (*SpanLogger, context.Context) { span, ctx := opentracing.StartSpanFromContext(ctx, method) if ids, _ := tenant.TenantIDs(ctx); len(ids) > 0 { span.SetTag(TenantIDTagName, ids) @@ -83,7 +83,7 @@ func FromContextWithFallback(ctx context.Context, fallback log.Logger) *SpanLogg // Log implements gokit's Logger interface; sends logs to underlying logger and // also puts the on the spans. -func (s *SpanLogger) Log(kvps ...interface{}) error { +func (s *SpanLogger) Log(kvps ...any) error { s.Logger.Log(kvps...) fields, err := otlog.InterleavedKVToFields(kvps...) if err != nil { diff --git a/pkg/util/spanlogger/spanlogger_test.go b/pkg/util/spanlogger/spanlogger_test.go index 86bc10e252..f522fa6f9f 100644 --- a/pkg/util/spanlogger/spanlogger_test.go +++ b/pkg/util/spanlogger/spanlogger_test.go @@ -25,8 +25,8 @@ func TestSpanLogger_Log(t *testing.T) { } func TestSpanLogger_CustomLogger(t *testing.T) { - var logged [][]interface{} - var logger funcLogger = func(keyvals ...interface{}) error { + var logged [][]any + var logger funcLogger = func(keyvals ...any) error { logged = append(logged, keyvals) return nil } @@ -39,7 +39,7 @@ func TestSpanLogger_CustomLogger(t *testing.T) { span = FromContextWithFallback(context.Background(), logger) _ = span.Log("msg", "fallback spanlogger") - expect := [][]interface{}{ + expect := [][]any{ {"method", "test", "msg", "original spanlogger"}, {"msg", "restored spanlogger"}, {"msg", "fallback spanlogger"}, @@ -68,8 +68,8 @@ func createSpan(ctx context.Context) *mocktracer.MockSpan { return logger.Span.(*mocktracer.MockSpan) } -type funcLogger func(keyvals ...interface{}) error +type funcLogger func(keyvals ...any) error -func (f funcLogger) Log(keyvals ...interface{}) error { +func (f funcLogger) Log(keyvals ...any) error { return f(keyvals...) } diff --git a/pkg/util/strings.go b/pkg/util/strings.go index 4965dc52a5..4fdaded30c 100644 --- a/pkg/util/strings.go +++ b/pkg/util/strings.go @@ -17,17 +17,6 @@ const ( internerLruCacheTTL = time.Hour * 2 ) -// StringsContain returns true if the search value is within the list of input values. -func StringsContain(values []string, search string) bool { - for _, v := range values { - if search == v { - return true - } - } - - return false -} - // StringsMap returns a map where keys are input values. func StringsMap(values []string) map[string]bool { out := make(map[string]bool, len(values)) diff --git a/pkg/util/strings_test.go b/pkg/util/strings_test.go index de4cc28092..ddc8df1f1a 100644 --- a/pkg/util/strings_test.go +++ b/pkg/util/strings_test.go @@ -96,10 +96,9 @@ func BenchmarkMergeSlicesParallel(b *testing.B) { b.Run(name, func(b *testing.B) { // Run the benchmark. b.ReportAllocs() - b.ResetTimer() var r []string var err error - for i := 0; i < b.N; i++ { + for b.Loop() { if p == usingMap { r = sortUsingMap(input...) require.NotEmpty(b, r) diff --git a/pkg/util/test/poll.go b/pkg/util/test/poll.go index b88e073a86..8759d56d2b 100644 --- a/pkg/util/test/poll.go +++ b/pkg/util/test/poll.go @@ -7,7 +7,7 @@ import ( ) // Poll repeatedly evaluates condition until we either timeout, or it succeeds. -func Poll(t testing.TB, d time.Duration, want interface{}, have func() interface{}) { +func Poll(t testing.TB, d time.Duration, want any, have func() any) { t.Helper() deadline := time.Now().Add(d) for !time.Now().After(deadline) { diff --git a/pkg/util/test_util.go b/pkg/util/test_util.go index 521a921e1c..193e7dd9d4 100644 --- a/pkg/util/test_util.go +++ b/pkg/util/test_util.go @@ -19,10 +19,10 @@ func GenerateRandomStrings() []string { randomChar := "0123456789abcdef" randomStrings := make([]string, 0, 1000000) sb := strings.Builder{} - for i := 0; i < 1000000; i++ { + for range 1000000 { sb.Reset() sb.WriteString("pod://") - for j := 0; j < 14; j++ { + for range 14 { sb.WriteByte(randomChar[rand.Int()%len(randomChar)]) } randomStrings = append(randomStrings, sb.String()) @@ -50,20 +50,20 @@ func GenerateChunk(t require.TestingT, step time.Duration, from model.Time, poin switch pe { case chunkenc.EncXOR: - for i := 0; i < points; i++ { + for range points { appender.Append(int64(ts), float64(ts)) ts = ts.Add(step) } case chunkenc.EncHistogram: histograms := histogram_util.GenerateTestHistograms(int(from), int(step/time.Millisecond), points) - for i := 0; i < points; i++ { + for i := range points { _, _, appender, err = appender.AppendHistogram(nil, int64(ts), histograms[i], true) require.NoError(t, err) ts = ts.Add(step) } case chunkenc.EncFloatHistogram: histograms := histogram_util.GenerateTestHistograms(int(from), int(step/time.Millisecond), points) - for i := 0; i < points; i++ { + for i := range points { _, _, appender, err = appender.AppendFloatHistogram(nil, int64(ts), histograms[i].ToFloat(nil), true) require.NoError(t, err) ts = ts.Add(step) diff --git a/pkg/util/time_test.go b/pkg/util/time_test.go index 6bdeb23193..3696cbace0 100644 --- a/pkg/util/time_test.go +++ b/pkg/util/time_test.go @@ -36,7 +36,7 @@ func TestTimeFromMillis(t *testing.T) { func TestDurationWithJitter(t *testing.T) { const numRuns = 1000 - for i := 0; i < numRuns; i++ { + for range numRuns { actual := DurationWithJitter(time.Minute, 0.5) assert.GreaterOrEqual(t, int64(actual), int64(30*time.Second)) assert.LessOrEqual(t, int64(actual), int64(90*time.Second)) @@ -50,7 +50,7 @@ func TestDurationWithJitter_ZeroInputDuration(t *testing.T) { func TestDurationWithPositiveJitter(t *testing.T) { const numRuns = 1000 - for i := 0; i < numRuns; i++ { + for range numRuns { actual := DurationWithPositiveJitter(time.Minute, 0.5) assert.GreaterOrEqual(t, int64(actual), int64(60*time.Second)) assert.LessOrEqual(t, int64(actual), int64(90*time.Second)) @@ -230,7 +230,7 @@ func TestSlottedTicker(t *testing.T) { slotSize := tc.duration.Milliseconds() / int64(tc.totalSlots) successCount := 0 - test.Poll(t, 5*time.Second, true, func() interface{} { + test.Poll(t, 5*time.Second, true, func() any { tTime := <-ticker.C slotShiftInMs := tTime.UnixMilli() % tc.duration.Milliseconds() slot := slotShiftInMs / slotSize @@ -255,13 +255,13 @@ func TestSlottedTicker(t *testing.T) { ticker := NewSlottedTicker(infoFunc, d, 0) - test.Poll(t, 5*time.Second, true, func() interface{} { + test.Poll(t, 5*time.Second, true, func() any { tTime := <-ticker.C slotShiftInMs := tTime.UnixMilli() % d.Milliseconds() return slotShiftInMs >= 60 && slotShiftInMs <= 90 }) slotSize.Store(5) - test.Poll(t, 2*time.Second, true, func() interface{} { + test.Poll(t, 2*time.Second, true, func() any { tTime := <-ticker.C slotShiftInMs := tTime.UnixMilli() % d.Milliseconds() return slotShiftInMs >= 120 && slotShiftInMs <= 180 diff --git a/pkg/util/tls/test/tls_integration_test.go b/pkg/util/tls/test/tls_integration_test.go index ce3bcb4cb9..d37e57f2c6 100644 --- a/pkg/util/tls/test/tls_integration_test.go +++ b/pkg/util/tls/test/tls_integration_test.go @@ -39,19 +39,6 @@ type grpcHealthCheck struct { healthy bool } -func (h *grpcHealthCheck) List(ctx context.Context, request *grpc_health_v1.HealthListRequest) (*grpc_health_v1.HealthListResponse, error) { - checkResp, err := h.Check(ctx, nil) - if err != nil { - return &grpc_health_v1.HealthListResponse{}, err - } - - return &grpc_health_v1.HealthListResponse{ - Statuses: map[string]*grpc_health_v1.HealthCheckResponse{ - "server": checkResp, - }, - }, nil -} - func (h *grpcHealthCheck) Check(_ context.Context, _ *grpc_health_v1.HealthCheckRequest) (*grpc_health_v1.HealthCheckResponse, error) { if !h.healthy { return &grpc_health_v1.HealthCheckResponse{Status: grpc_health_v1.HealthCheckResponse_NOT_SERVING}, nil diff --git a/pkg/util/validation/exporter.go b/pkg/util/validation/exporter.go index a0a4d07193..69f258113f 100644 --- a/pkg/util/validation/exporter.go +++ b/pkg/util/validation/exporter.go @@ -1,6 +1,10 @@ package validation import ( + "reflect" + "strings" + "time" + "github.com/prometheus/client_golang/prometheus" ) @@ -31,12 +35,56 @@ func (oe *OverridesExporter) Describe(ch chan<- *prometheus.Desc) { func (oe *OverridesExporter) Collect(ch chan<- prometheus.Metric) { allLimits := oe.tenantLimits.AllByUserID() for tenant, limits := range allLimits { - ch <- prometheus.MustNewConstMetric(oe.description, prometheus.GaugeValue, limits.IngestionRate, "ingestion_rate", tenant) - ch <- prometheus.MustNewConstMetric(oe.description, prometheus.GaugeValue, float64(limits.IngestionBurstSize), "ingestion_burst_size", tenant) + for metricName, value := range ExtractNumericalValues(limits) { + ch <- prometheus.MustNewConstMetric(oe.description, prometheus.GaugeValue, value, metricName, tenant) + } + } +} + +func ExtractNumericalValues(l *Limits) map[string]float64 { + metrics := make(map[string]float64) + + v := reflect.ValueOf(l).Elem() + t := v.Type() + + for i := 0; i < v.NumField(); i++ { + field := v.Field(i) + fieldType := t.Field(i) + + tag := fieldType.Tag.Get("yaml") + if tag == "" || tag == "-" { + // not exist tag or tag is "-" + continue + } + + // remove options like omitempty + if idx := strings.Index(tag, ","); idx != -1 { + tag = tag[:idx] + } - ch <- prometheus.MustNewConstMetric(oe.description, prometheus.GaugeValue, float64(limits.MaxLocalSeriesPerUser), "max_local_series_per_user", tenant) - ch <- prometheus.MustNewConstMetric(oe.description, prometheus.GaugeValue, float64(limits.MaxLocalSeriesPerMetric), "max_local_series_per_metric", tenant) - ch <- prometheus.MustNewConstMetric(oe.description, prometheus.GaugeValue, float64(limits.MaxGlobalSeriesPerUser), "max_global_series_per_user", tenant) - ch <- prometheus.MustNewConstMetric(oe.description, prometheus.GaugeValue, float64(limits.MaxGlobalSeriesPerMetric), "max_global_series_per_metric", tenant) + switch field.Kind() { + case reflect.Int, reflect.Int64: + if field.Type().String() == "model.Duration" { + // we export the model.Duration in seconds + metrics[tag] = time.Duration(field.Int()).Seconds() + } else { + metrics[tag] = float64(field.Int()) + } + case reflect.Uint, reflect.Uint64: + metrics[tag] = float64(field.Uint()) + case reflect.Float64: + metrics[tag] = field.Float() + case reflect.Bool: + if field.Bool() { + // true as 1.0 + metrics[tag] = 1.0 + } else { + // false as 0.0 + metrics[tag] = 0.0 + } + case reflect.String, reflect.Slice, reflect.Map, reflect.Struct: + continue + } } + return metrics } diff --git a/pkg/util/validation/exporter_test.go b/pkg/util/validation/exporter_test.go index 44d503a80d..3c2ca56e59 100644 --- a/pkg/util/validation/exporter_test.go +++ b/pkg/util/validation/exporter_test.go @@ -1,10 +1,14 @@ package validation import ( + "flag" + "strings" "testing" + "time" "github.com/prometheus/client_golang/prometheus/testutil" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestOverridesExporter_noConfig(t *testing.T) { @@ -21,10 +25,164 @@ func TestOverridesExporter_withConfig(t *testing.T) { MaxQueriersPerTenant: 5, }, } + fs := flag.NewFlagSet("test", flag.ContinueOnError) + tenantLimits["tenant-a"].RegisterFlags(fs) exporter := NewOverridesExporter(newMockTenantLimits(tenantLimits)) // There should be at least a few metrics generated by receiving an override configuration map count := testutil.CollectAndCount(exporter, "cortex_overrides") assert.Greater(t, count, 0) + require.NoError(t, testutil.CollectAndCompare(exporter, strings.NewReader(` + # HELP cortex_overrides Resource limit overrides applied to tenants + # TYPE cortex_overrides gauge + cortex_overrides{limit_name="accept_ha_samples",user="tenant-a"} 0 + cortex_overrides{limit_name="accept_mixed_ha_samples",user="tenant-a"} 0 + cortex_overrides{limit_name="alertmanager_max_alerts_count",user="tenant-a"} 0 + cortex_overrides{limit_name="alertmanager_max_alerts_size_bytes",user="tenant-a"} 0 + cortex_overrides{limit_name="alertmanager_max_config_size_bytes",user="tenant-a"} 0 + cortex_overrides{limit_name="alertmanager_max_dispatcher_aggregation_groups",user="tenant-a"} 0 + cortex_overrides{limit_name="alertmanager_max_silences_count",user="tenant-a"} 0 + cortex_overrides{limit_name="alertmanager_max_silences_size_bytes",user="tenant-a"} 0 + cortex_overrides{limit_name="alertmanager_max_template_size_bytes",user="tenant-a"} 0 + cortex_overrides{limit_name="alertmanager_max_templates_count",user="tenant-a"} 0 + cortex_overrides{limit_name="alertmanager_notification_rate_limit",user="tenant-a"} 0 + cortex_overrides{limit_name="alertmanager_receivers_firewall_block_private_addresses",user="tenant-a"} 0 + cortex_overrides{limit_name="compactor_blocks_retention_period",user="tenant-a"} 0 + cortex_overrides{limit_name="compactor_partition_index_size_bytes",user="tenant-a"} 6.8719476736e+10 + cortex_overrides{limit_name="compactor_partition_series_count",user="tenant-a"} 0 + cortex_overrides{limit_name="compactor_tenant_shard_size",user="tenant-a"} 0 + cortex_overrides{limit_name="creation_grace_period",user="tenant-a"} 600 + cortex_overrides{limit_name="enable_native_histograms",user="tenant-a"} 0 + cortex_overrides{limit_name="enforce_metadata_metric_name",user="tenant-a"} 1 + cortex_overrides{limit_name="enforce_metric_name",user="tenant-a"} 1 + cortex_overrides{limit_name="ha_max_clusters",user="tenant-a"} 0 + cortex_overrides{limit_name="ingestion_burst_size",user="tenant-a"} 50000 + cortex_overrides{limit_name="ingestion_rate",user="tenant-a"} 25000 + cortex_overrides{limit_name="ingestion_tenant_shard_size",user="tenant-a"} 0 + cortex_overrides{limit_name="max_cache_freshness",user="tenant-a"} 60 + cortex_overrides{limit_name="max_downloaded_bytes_per_request",user="tenant-a"} 0 + cortex_overrides{limit_name="max_exemplars",user="tenant-a"} 0 + cortex_overrides{limit_name="max_fetched_chunk_bytes_per_query",user="tenant-a"} 0 + cortex_overrides{limit_name="max_fetched_chunks_per_query",user="tenant-a"} 2e+06 + cortex_overrides{limit_name="max_fetched_data_bytes_per_query",user="tenant-a"} 0 + cortex_overrides{limit_name="max_fetched_series_per_query",user="tenant-a"} 0 + cortex_overrides{limit_name="max_global_metadata_per_metric",user="tenant-a"} 0 + cortex_overrides{limit_name="max_global_metadata_per_user",user="tenant-a"} 0 + cortex_overrides{limit_name="max_global_native_histogram_series_per_user",user="tenant-a"} 0 + cortex_overrides{limit_name="max_global_series_per_metric",user="tenant-a"} 0 + cortex_overrides{limit_name="max_global_series_per_user",user="tenant-a"} 0 + cortex_overrides{limit_name="max_label_name_length",user="tenant-a"} 1024 + cortex_overrides{limit_name="max_label_names_per_series",user="tenant-a"} 30 + cortex_overrides{limit_name="max_label_value_length",user="tenant-a"} 2048 + cortex_overrides{limit_name="max_labels_size_bytes",user="tenant-a"} 0 + cortex_overrides{limit_name="max_metadata_length",user="tenant-a"} 1024 + cortex_overrides{limit_name="max_metadata_per_metric",user="tenant-a"} 10 + cortex_overrides{limit_name="max_metadata_per_user",user="tenant-a"} 8000 + cortex_overrides{limit_name="max_native_histogram_buckets",user="tenant-a"} 0 + cortex_overrides{limit_name="max_native_histogram_sample_size_bytes",user="tenant-a"} 0 + cortex_overrides{limit_name="max_native_histogram_series_per_user",user="tenant-a"} 0 + cortex_overrides{limit_name="max_outstanding_requests_per_tenant",user="tenant-a"} 100 + cortex_overrides{limit_name="max_queriers_per_tenant",user="tenant-a"} 0 + cortex_overrides{limit_name="max_query_length",user="tenant-a"} 0 + cortex_overrides{limit_name="max_query_lookback",user="tenant-a"} 0 + cortex_overrides{limit_name="max_query_parallelism",user="tenant-a"} 14 + cortex_overrides{limit_name="max_query_response_size",user="tenant-a"} 0 + cortex_overrides{limit_name="max_series_per_metric",user="tenant-a"} 50000 + cortex_overrides{limit_name="max_series_per_user",user="tenant-a"} 5e+06 + cortex_overrides{limit_name="native_histogram_ingestion_burst_size",user="tenant-a"} 0 + cortex_overrides{limit_name="native_histogram_ingestion_rate",user="tenant-a"} 1.7976931348623157e+308 + cortex_overrides{limit_name="out_of_order_time_window",user="tenant-a"} 0 + cortex_overrides{limit_name="parquet_converter_enabled",user="tenant-a"} 0 + cortex_overrides{limit_name="parquet_converter_tenant_shard_size",user="tenant-a"} 0 + cortex_overrides{limit_name="parquet_max_fetched_chunk_bytes",user="tenant-a"} 0 + cortex_overrides{limit_name="parquet_max_fetched_data_bytes",user="tenant-a"} 0 + cortex_overrides{limit_name="parquet_max_fetched_row_count",user="tenant-a"} 0 + cortex_overrides{limit_name="query_partial_data",user="tenant-a"} 0 + cortex_overrides{limit_name="query_vertical_shard_size",user="tenant-a"} 0 + cortex_overrides{limit_name="reject_old_samples",user="tenant-a"} 0 + cortex_overrides{limit_name="reject_old_samples_max_age",user="tenant-a"} 1.2096e+06 + cortex_overrides{limit_name="ruler_evaluation_delay_duration",user="tenant-a"} 0 + cortex_overrides{limit_name="ruler_max_rule_groups_per_tenant",user="tenant-a"} 0 + cortex_overrides{limit_name="ruler_max_rules_per_rule_group",user="tenant-a"} 0 + cortex_overrides{limit_name="ruler_query_offset",user="tenant-a"} 0 + cortex_overrides{limit_name="ruler_tenant_shard_size",user="tenant-a"} 0 + cortex_overrides{limit_name="rules_partial_data",user="tenant-a"} 0 + cortex_overrides{limit_name="store_gateway_tenant_shard_size",user="tenant-a"} 0 + `), "cortex_overrides")) +} + +func TestExtractNumericalValues(t *testing.T) { + limits := &Limits{} + fs := flag.NewFlagSet("test", flag.ContinueOnError) + limits.RegisterFlags(fs) + extracted := ExtractNumericalValues(limits) + t.Run("float64 should be converted", func(t *testing.T) { + require.Equal(t, limits.IngestionRate, extracted["ingestion_rate"]) + }) + t.Run("int should be converted", func(t *testing.T) { + require.Equal(t, float64(limits.IngestionBurstSize), extracted["ingestion_burst_size"]) + }) + t.Run("int64 should be converted", func(t *testing.T) { + require.Equal(t, float64(limits.MaxQueryResponseSize), extracted["max_query_response_size"]) + }) + t.Run("string shouldn't be converted", func(t *testing.T) { + _, ok := extracted["ingestion_rate_strategy"] + require.False(t, ok, "string should be not converted") + }) + t.Run("bool should be converted, default value false converted to 0", func(t *testing.T) { + val, ok := extracted["accept_ha_samples"] + require.True(t, ok) + require.Equal(t, 0.0, val) + }) + t.Run("bool should be converted, default value true converted to 1", func(t *testing.T) { + val, ok := extracted["enforce_metric_name"] + require.True(t, ok) + require.Equal(t, 1.0, val) + }) + t.Run("flagext.StringSlice shouldn't be converted", func(t *testing.T) { + _, ok := extracted["drop_labels"] + require.False(t, ok) + }) + t.Run("model.Duration should be converted", func(t *testing.T) { + val, ok := extracted["reject_old_samples_max_age"] + require.True(t, ok) + require.Equal(t, time.Duration(limits.RejectOldSamplesMaxAge).Seconds(), val) + }) + t.Run("[]*relabel.Config shouldn't be converted", func(t *testing.T) { + _, ok := extracted["metric_relabel_configs"] + require.False(t, ok) + }) + t.Run("[]string shouldn't be converted", func(t *testing.T) { + _, ok := extracted["promote_resource_attributes"] + require.False(t, ok) + }) + t.Run("[]LimitsPerLabelSet shouldn't be converted", func(t *testing.T) { + _, ok := extracted["limits_per_label_set"] + require.False(t, ok) + }) + t.Run("QueryPriority shouldn't be converted", func(t *testing.T) { + _, ok := extracted["query_priority"] + require.False(t, ok) + }) + t.Run("QueryRejection shouldn't be converted", func(t *testing.T) { + _, ok := extracted["query_rejection"] + require.False(t, ok) + }) + t.Run("labels.Labels shouldn't be converted", func(t *testing.T) { + _, ok := extracted["ruler_external_labels"] + require.False(t, ok) + }) + t.Run("flagext.CIDRSliceCSV shouldn't be converted", func(t *testing.T) { + _, ok := extracted["alertmanager_receivers_firewall_block_cidr_networks"] + require.False(t, ok) + }) + t.Run("NotificationRateLimitMap shouldn't be converted", func(t *testing.T) { + _, ok := extracted["alertmanager_notification_rate_limit_per_integration"] + require.False(t, ok) + }) + t.Run("DisabledRuleGroups shouldn't be converted", func(t *testing.T) { + _, ok := extracted["disabled_rule_groups"] + require.False(t, ok) + }) } diff --git a/pkg/util/validation/limits.go b/pkg/util/validation/limits.go index fcd96fea36..c0611bff90 100644 --- a/pkg/util/validation/limits.go +++ b/pkg/util/validation/limits.go @@ -6,6 +6,7 @@ import ( "errors" "flag" "fmt" + "maps" "math" "regexp" "strings" @@ -184,13 +185,13 @@ type Limits struct { MaxQueryResponseSize int64 `yaml:"max_query_response_size" json:"max_query_response_size"` MaxCacheFreshness model.Duration `yaml:"max_cache_freshness" json:"max_cache_freshness"` MaxQueriersPerTenant float64 `yaml:"max_queriers_per_tenant" json:"max_queriers_per_tenant"` - QueryVerticalShardSize int `yaml:"query_vertical_shard_size" json:"query_vertical_shard_size" doc:"hidden"` + QueryVerticalShardSize int `yaml:"query_vertical_shard_size" json:"query_vertical_shard_size"` QueryPartialData bool `yaml:"query_partial_data" json:"query_partial_data" doc:"nocli|description=Enable to allow queries to be evaluated with data from a single zone, if other zones are not available.|default=false"` // Parquet Queryable enforced limits. - ParquetMaxFetchedRowCount int `yaml:"parquet_max_fetched_row_count" json:"parquet_max_fetched_row_count" doc:"hidden"` - ParquetMaxFetchedChunkBytes int `yaml:"parquet_max_fetched_chunk_bytes" json:"parquet_max_fetched_chunk_bytes" doc:"hidden"` - ParquetMaxFetchedDataBytes int `yaml:"parquet_max_fetched_data_bytes" json:"parquet_max_fetched_data_bytes" doc:"hidden"` + ParquetMaxFetchedRowCount int `yaml:"parquet_max_fetched_row_count" json:"parquet_max_fetched_row_count"` + ParquetMaxFetchedChunkBytes int `yaml:"parquet_max_fetched_chunk_bytes" json:"parquet_max_fetched_chunk_bytes"` + ParquetMaxFetchedDataBytes int `yaml:"parquet_max_fetched_data_bytes" json:"parquet_max_fetched_data_bytes"` // Query Frontend / Scheduler enforced limits. MaxOutstandingPerTenant int `yaml:"max_outstanding_requests_per_tenant" json:"max_outstanding_requests_per_tenant"` @@ -219,9 +220,9 @@ type Limits struct { CompactorPartitionSeriesCount int64 `yaml:"compactor_partition_series_count" json:"compactor_partition_series_count"` // Parquet converter - ParquetConverterEnabled bool `yaml:"parquet_converter_enabled" json:"parquet_converter_enabled" doc:"hidden"` - ParquetConverterTenantShardSize float64 `yaml:"parquet_converter_tenant_shard_size" json:"parquet_converter_tenant_shard_size" doc:"hidden"` - + ParquetConverterEnabled bool `yaml:"parquet_converter_enabled" json:"parquet_converter_enabled"` + ParquetConverterTenantShardSize float64 `yaml:"parquet_converter_tenant_shard_size" json:"parquet_converter_tenant_shard_size"` + ParquetConverterSortColumns []string `yaml:"parquet_converter_sort_columns" json:"parquet_converter_sort_columns"` // This config doesn't have a CLI flag registered here because they're registered in // their own original config struct. S3SSEType string `yaml:"s3_sse_type" json:"s3_sse_type" doc:"nocli|description=S3 server-side encryption type. Required to enable server-side encryption overrides for a specific tenant. If not set, the default S3 client settings are used."` @@ -324,6 +325,7 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) { f.Float64Var(&l.ParquetConverterTenantShardSize, "parquet-converter.tenant-shard-size", 0, "The default tenant's shard size when the shuffle-sharding strategy is used by the parquet converter. When this setting is specified in the per-tenant overrides, a value of 0 disables shuffle sharding for the tenant. If the value is < 1 and > 0 the shard size will be a percentage of the total parquet converters.") f.BoolVar(&l.ParquetConverterEnabled, "parquet-converter.enabled", false, "If set, enables the Parquet converter to create the parquet files.") + f.Var((*flagext.StringSlice)(&l.ParquetConverterSortColumns), "parquet-converter.sort-columns", "Additional label names for specific tenants to sort by after metric name, in order of precedence. These are applied during Parquet file generation.") // Parquet Queryable enforced limits. f.IntVar(&l.ParquetMaxFetchedRowCount, "querier.parquet-queryable.max-fetched-row-count", 0, "The maximum number of rows that can be fetched when querying parquet storage. Each row maps to a series in a parquet file. This limit applies before materializing chunks. 0 to disable.") @@ -390,7 +392,7 @@ func (l *Limits) Validate(shardByAllLabels bool, activeSeriesMetricsEnabled bool } // UnmarshalYAML implements the yaml.Unmarshaler interface. -func (l *Limits) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (l *Limits) UnmarshalYAML(unmarshal func(any) error) error { // We want to set l to the defaults and then overwrite it with the input. // To make unmarshal fill the plain data struct rather than calling UnmarshalYAML // again, we have to hide it using a type indirection. See prometheus/config. @@ -465,9 +467,7 @@ func (l *Limits) calculateMaxSeriesPerLabelSetId() error { func (l *Limits) copyNotificationIntegrationLimits(defaults NotificationRateLimitMap) { l.NotificationRateLimitPerIntegration = make(map[string]float64, len(defaults)) - for k, v := range defaults { - l.NotificationRateLimitPerIntegration[k] = v - } + maps.Copy(l.NotificationRateLimitPerIntegration, defaults) } func (l *Limits) hasQueryAttributeRegexChanged() bool { @@ -904,6 +904,11 @@ func (o *Overrides) ParquetConverterEnabled(userID string) bool { return o.GetOverridesForUser(userID).ParquetConverterEnabled } +// ParquetConverterSortColumns returns the additional sort columns for parquet files. +func (o *Overrides) ParquetConverterSortColumns(userID string) []string { + return o.GetOverridesForUser(userID).ParquetConverterSortColumns +} + // ParquetMaxFetchedRowCount returns the maximum number of rows that can be fetched when querying parquet storage. func (o *Overrides) ParquetMaxFetchedRowCount(userID string) int { return o.GetOverridesForUser(userID).ParquetMaxFetchedRowCount @@ -1202,11 +1207,16 @@ outer: defaultPartitionIndex = i continue } - for _, lbl := range lbls.LabelSet { + found := true + lbls.LabelSet.Range(func(l labels.Label) { // We did not find some of the labels on the set - if v := metric.Get(lbl.Name); v != lbl.Value { - continue outer + if v := metric.Get(l.Name); v != l.Value { + found = false } + }) + + if !found { + continue outer } r = append(r, lbls) } diff --git a/pkg/util/validation/limits_test.go b/pkg/util/validation/limits_test.go index 308067e959..7896c8ee95 100644 --- a/pkg/util/validation/limits_test.go +++ b/pkg/util/validation/limits_test.go @@ -116,17 +116,16 @@ func TestLimits_Validate(t *testing.T) { expected: errMaxLocalNativeHistogramSeriesPerUserValidation, }, "external-labels invalid label name": { - limits: Limits{RulerExternalLabels: labels.Labels{{Name: "123invalid", Value: "good"}}}, + limits: Limits{RulerExternalLabels: labels.FromStrings("123invalid", "good")}, expected: errInvalidLabelName, }, "external-labels invalid label value": { - limits: Limits{RulerExternalLabels: labels.Labels{{Name: "good", Value: string([]byte{0xff, 0xfe, 0xfd})}}}, + limits: Limits{RulerExternalLabels: labels.FromStrings("good", string([]byte{0xff, 0xfe, 0xfd}))}, expected: errInvalidLabelValue, }, } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { assert.ErrorIs(t, testData.limits.Validate(testData.shardByAllLabels, testData.activeSeriesMetricsEnabled), testData.expected) @@ -217,7 +216,7 @@ func TestLimitsTagsYamlMatchJson(t *testing.T) { n := limits.NumField() var mismatch []string - for i := 0; i < n; i++ { + for i := range n { field := limits.Field(i) // Note that we aren't requiring YAML and JSON tags to match, just that @@ -288,7 +287,7 @@ func TestLimitsAlwaysUsesPromDuration(t *testing.T) { n := limits.NumField() var badDurationType []string - for i := 0; i < n; i++ { + for i := range n { field := limits.Field(i) if field.Type == stdlibDuration { badDurationType = append(badDurationType, field.Name) diff --git a/pkg/util/validation/notifications_limit_flag.go b/pkg/util/validation/notifications_limit_flag.go index 403980cd04..d06c7e6ff6 100644 --- a/pkg/util/validation/notifications_limit_flag.go +++ b/pkg/util/validation/notifications_limit_flag.go @@ -3,10 +3,9 @@ package validation import ( "encoding/json" "fmt" + "slices" "github.com/pkg/errors" - - "github.com/cortexproject/cortex/pkg/util" ) var allowedIntegrationNames = []string{ @@ -32,7 +31,7 @@ func (m NotificationRateLimitMap) Set(s string) error { } // UnmarshalYAML implements yaml.Unmarshaler. -func (m NotificationRateLimitMap) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (m NotificationRateLimitMap) UnmarshalYAML(unmarshal func(any) error) error { newMap := map[string]float64{} return m.updateMap(unmarshal(newMap), newMap) } @@ -43,7 +42,7 @@ func (m NotificationRateLimitMap) updateMap(unmarshalErr error, newMap map[strin } for k, v := range newMap { - if !util.StringsContain(allowedIntegrationNames, k) { + if !slices.Contains(allowedIntegrationNames, k) { return errors.Errorf("unknown integration name: %s", k) } m[k] = v @@ -52,6 +51,6 @@ func (m NotificationRateLimitMap) updateMap(unmarshalErr error, newMap map[strin } // MarshalYAML implements yaml.Marshaler. -func (m NotificationRateLimitMap) MarshalYAML() (interface{}, error) { +func (m NotificationRateLimitMap) MarshalYAML() (any, error) { return map[string]float64(m), nil } diff --git a/pkg/util/validation/validate.go b/pkg/util/validation/validate.go index 7f6b09c231..4dbb0b1714 100644 --- a/pkg/util/validation/validate.go +++ b/pkg/util/validation/validate.go @@ -17,6 +17,7 @@ import ( "github.com/cortexproject/cortex/pkg/cortexpb" "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/discardedseries" "github.com/cortexproject/cortex/pkg/util/extract" "github.com/cortexproject/cortex/pkg/util/labelset" ) @@ -87,6 +88,11 @@ type ValidateMetrics struct { DiscardedSamplesPerLabelSet *prometheus.CounterVec LabelSetTracker *labelset.LabelSetTracker + + DiscardedSeries *prometheus.GaugeVec + DiscardedSeriesPerLabelset *prometheus.GaugeVec + DiscardedSeriesTracker *discardedseries.DiscardedSeriesTracker + DiscardedSeriesPerLabelsetTracker *discardedseries.DiscardedSeriesPerLabelsetTracker } func registerCollector(r prometheus.Registerer, c prometheus.Collector) { @@ -145,6 +151,22 @@ func NewValidateMetrics(r prometheus.Registerer) *ValidateMetrics { NativeHistogramMinResetDuration: 1 * time.Hour, }, []string{"user"}) registerCollector(r, labelSizeBytes) + discardedSeries := prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "cortex_discarded_series", + Help: "The number of series that include discarded samples.", + }, + []string{discardReasonLabel, "user"}, + ) + registerCollector(r, discardedSeries) + discardedSeriesPerLabelset := prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "cortex_discarded_series_per_labelset", + Help: "The number of series that include discarded samples for each labelset.", + }, + []string{discardReasonLabel, "user", "labelset"}, + ) + registerCollector(r, discardedSeriesPerLabelset) m := &ValidateMetrics{ DiscardedSamples: discardedSamples, @@ -154,7 +176,13 @@ func NewValidateMetrics(r prometheus.Registerer) *ValidateMetrics { HistogramSamplesReducedResolution: histogramSamplesReducedResolution, LabelSizeBytes: labelSizeBytes, LabelSetTracker: labelset.NewLabelSetTracker(), + DiscardedSeries: discardedSeries, + DiscardedSeriesPerLabelset: discardedSeriesPerLabelset, + DiscardedSeriesTracker: discardedseries.NewDiscardedSeriesTracker(discardedSeries), + DiscardedSeriesPerLabelsetTracker: discardedseries.NewDiscardedSeriesPerLabelsetTracker(discardedSeriesPerLabelset), } + m.DiscardedSeriesTracker.StartVendDiscardedSeriesMetricGoroutine() + m.DiscardedSeriesPerLabelsetTracker.StartVendDiscardedSeriesMetricGoroutine() return m } diff --git a/pkg/util/worker_pool.go b/pkg/util/worker_pool.go index 8ebaad60e2..d46d056084 100644 --- a/pkg/util/worker_pool.go +++ b/pkg/util/worker_pool.go @@ -51,7 +51,7 @@ func NewWorkerPool(name string, numWorkers int, reg prometheus.Registerer) Async }), } - for i := 0; i < numWorkers; i++ { + for range numWorkers { go wp.run() } @@ -74,7 +74,7 @@ func (s *workerPoolExecutor) Submit(f func()) { } func (s *workerPoolExecutor) run() { - for completed := 0; completed < serverWorkerResetThreshold; completed++ { + for range serverWorkerResetThreshold { f, ok := <-s.serverWorkerChannel if !ok { return diff --git a/pkg/util/worker_pool_test.go b/pkg/util/worker_pool_test.go index f6294f5a8a..037a9ef61a 100644 --- a/pkg/util/worker_pool_test.go +++ b/pkg/util/worker_pool_test.go @@ -61,7 +61,7 @@ func TestWorkerPool_ShouldFallbackWhenAllWorkersAreBusy(t *testing.T) { // Lets lock all submited jobs m.Lock() - for i := 0; i < numberOfWorkers; i++ { + for range numberOfWorkers { workerPool.Submit(func() { defer blockerWg.Done() m.Lock() diff --git a/pkg/util/yaml.go b/pkg/util/yaml.go index bb8b4d802a..9286cfb403 100644 --- a/pkg/util/yaml.go +++ b/pkg/util/yaml.go @@ -4,13 +4,13 @@ import "gopkg.in/yaml.v2" // YAMLMarshalUnmarshal utility function that converts a YAML interface in a map // doing marshal and unmarshal of the parameter -func YAMLMarshalUnmarshal(in interface{}) (map[interface{}]interface{}, error) { +func YAMLMarshalUnmarshal(in any) (map[any]any, error) { yamlBytes, err := yaml.Marshal(in) if err != nil { return nil, err } - object := make(map[interface{}]interface{}) + object := make(map[any]any) if err := yaml.Unmarshal(yamlBytes, object); err != nil { return nil, err } diff --git a/schemas/cortex-config-schema.json b/schemas/cortex-config-schema.json new file mode 100644 index 0000000000..1ae848a9c4 --- /dev/null +++ b/schemas/cortex-config-schema.json @@ -0,0 +1,8873 @@ +{ + "$id": "https://raw.githubusercontent.com/cortexproject/cortex/master/schemas/cortex-config-schema.json", + "$schema": "https://json-schema.org/draft/2020-12/schema", + "definitions": { + "DisabledRuleGroup": { + "properties": { + "name": { + "description": "name of the rule group", + "type": "string" + }, + "namespace": { + "description": "namespace in which the rule group belongs", + "type": "string" + } + }, + "type": "object" + }, + "Label": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + "LimitsPerLabelSet": { + "properties": { + "label_set": { + "additionalProperties": true, + "default": [], + "description": "LabelSet which the limit should be applied. If no labels are provided, it becomes the default partition which matches any series that doesn't match any other explicitly defined label sets.'", + "type": "object" + }, + "limits": { + "properties": { + "max_series": { + "description": "The maximum number of active series per LabelSet, across the cluster before replication. Setting the value 0 will enable the monitoring (metrics) but would not enforce any limits.", + "type": "number" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "PriorityDef": { + "properties": { + "priority": { + "default": 0, + "description": "Priority level. Must be a unique value.", + "type": "number" + }, + "query_attributes": { + "default": [], + "description": "List of query_attributes to match and assign priority to queries. A query is assigned to this priority if it matches any query_attribute in this list. Each query_attribute has several properties (e.g., regex, time_window, user_agent), and all specified properties must match for a query_attribute to be considered a match. Only the specified properties are checked, and an AND operator is applied to them.", + "items": { + "type": "string" + }, + "type": "array" + }, + "reserved_queriers": { + "default": 0, + "description": "Number of reserved queriers to handle priorities higher or equal to the priority level. Value between 0 and 1 will be used as a percentage.", + "type": "number" + } + }, + "type": "object" + }, + "QueryAttribute": { + "properties": { + "api_type": { + "description": "API type for the query. Should be one of the query, query_range, series, labels, label_values. If not set, it won't be checked.", + "type": "string" + }, + "dashboard_uid": { + "description": "Grafana includes X-Dashboard-Uid header in query requests. If this field is provided then X-Dashboard-Uid header of request should match this value. If not set, it won't be checked. This property won't be applied to metadata queries.", + "type": "string" + }, + "panel_id": { + "description": "Grafana includes X-Panel-Id header in query requests. If this field is provided then X-Panel-Id header of request should match this value. If not set, it won't be checked. This property won't be applied to metadata queries.", + "type": "string" + }, + "query_step_limit": { + "description": "If query step provided should be within this limit to match. If not set, it won't be checked. This property only applied to range queries and ignored for other types of queries.", + "properties": { + "max": { + "default": 0, + "description": "Query step should be below or equal to this value to match. If set to 0, it won't be checked.", + "type": "number" + }, + "min": { + "default": 0, + "description": "Query step should be above or equal to this value to match. If set to 0, it won't be checked.", + "type": "number" + } + }, + "type": "object" + }, + "regex": { + "description": "Regex that the query string (or at least one of the matchers in metadata query) should match. If not set, it won't be checked.", + "type": "string" + }, + "time_range_limit": { + "description": "Query time range should be within this limit to match. Depending on where it was used, in most of the use-cases, either min or max value will be used. If not set, it won't be checked.", + "properties": { + "max": { + "default": 0, + "description": "This will be duration (12h, 1d, 15d etc.). Query time range should be below or equal to this value to match. Ex: if this value is 24h, then queries whose range is smaller than or equal to 24h will match.If set to 0, it won't be checked.", + "type": "number" + }, + "min": { + "default": 0, + "description": "This will be duration (12h, 1d, 15d etc.). Query time range should be above or equal to this value to match. Ex: if this value is 20d, then queries whose range is bigger than or equal to 20d will match. If set to 0, it won't be checked.", + "type": "number" + } + }, + "type": "object" + }, + "time_window": { + "description": "Overall data select time window (including range selectors, modifiers and lookback delta) that the query should be within. If not set, it won't be checked.", + "properties": { + "end": { + "default": 0, + "description": "End of the data select time window (including range selectors, modifiers and lookback delta) that the query should be within. If set to 0, it won't be checked.", + "type": "number" + }, + "start": { + "default": 0, + "description": "Start of the data select time window (including range selectors, modifiers and lookback delta) that the query should be within. If set to 0, it won't be checked.", + "type": "number" + } + }, + "type": "object" + }, + "user_agent_regex": { + "description": "Regex that User-Agent header of the request should match. If not set, it won't be checked.", + "type": "string" + } + }, + "type": "object" + }, + "alertmanager_config": { + "description": "The alertmanager_config configures the Cortex alertmanager.", + "properties": { + "alertmanager_client": { + "properties": { + "connect_timeout": { + "default": "5s", + "description": "The maximum amount of time to establish a connection. A value of 0 means using default gRPC client connect timeout 5s.", + "type": "string", + "x-cli-flag": "alertmanager.alertmanager-client.connect-timeout", + "x-format": "duration" + }, + "grpc_compression": { + "description": "Use compression when sending messages. Supported values are: 'gzip', 'snappy' and '' (disable compression)", + "type": "string", + "x-cli-flag": "alertmanager.alertmanager-client.grpc-compression" + }, + "max_recv_msg_size": { + "default": 16777216, + "description": "gRPC client max receive message size (bytes).", + "type": "number", + "x-cli-flag": "alertmanager.alertmanager-client.grpc-max-recv-msg-size" + }, + "max_send_msg_size": { + "default": 4194304, + "description": "gRPC client max send message size (bytes).", + "type": "number", + "x-cli-flag": "alertmanager.alertmanager-client.grpc-max-send-msg-size" + }, + "remote_timeout": { + "default": "2s", + "description": "Timeout for downstream alertmanagers.", + "type": "string", + "x-cli-flag": "alertmanager.alertmanager-client.remote-timeout", + "x-format": "duration" + }, + "tls_ca_path": { + "description": "Path to the CA certificates file to validate server certificate against. If not set, the host's root CA certificates are used.", + "type": "string", + "x-cli-flag": "alertmanager.alertmanager-client.tls-ca-path" + }, + "tls_cert_path": { + "description": "Path to the client certificate file, which will be used for authenticating with the server. Also requires the key path to be configured.", + "type": "string", + "x-cli-flag": "alertmanager.alertmanager-client.tls-cert-path" + }, + "tls_enabled": { + "default": false, + "description": "Enable TLS in the GRPC client. This flag needs to be enabled when any other TLS flag is set. If set to false, insecure connection to gRPC server will be used.", + "type": "boolean", + "x-cli-flag": "alertmanager.alertmanager-client.tls-enabled" + }, + "tls_insecure_skip_verify": { + "default": false, + "description": "Skip validating server certificate.", + "type": "boolean", + "x-cli-flag": "alertmanager.alertmanager-client.tls-insecure-skip-verify" + }, + "tls_key_path": { + "description": "Path to the key file for the client certificate. Also requires the client certificate to be configured.", + "type": "string", + "x-cli-flag": "alertmanager.alertmanager-client.tls-key-path" + }, + "tls_server_name": { + "description": "Override the expected name on the server certificate.", + "type": "string", + "x-cli-flag": "alertmanager.alertmanager-client.tls-server-name" + } + }, + "type": "object" + }, + "api_concurrency": { + "default": 0, + "description": "Maximum number of concurrent GET API requests before returning an error.", + "type": "number", + "x-cli-flag": "alertmanager.api-concurrency" + }, + "auto_webhook_root": { + "description": "Root of URL to generate if config is http://internal.monitor", + "type": "string", + "x-cli-flag": "alertmanager.configs.auto-webhook-root" + }, + "cluster": { + "properties": { + "advertise_address": { + "description": "Explicit address or hostname to advertise in cluster.", + "type": "string", + "x-cli-flag": "alertmanager.cluster.advertise-address" + }, + "gossip_interval": { + "default": "200ms", + "description": "The interval between sending gossip messages. By lowering this value (more frequent) gossip messages are propagated across cluster more quickly at the expense of increased bandwidth usage.", + "type": "string", + "x-cli-flag": "alertmanager.cluster.gossip-interval", + "x-format": "duration" + }, + "listen_address": { + "default": "0.0.0.0:9094", + "description": "Listen address and port for the cluster. Not specifying this flag disables high-availability mode.", + "type": "string", + "x-cli-flag": "alertmanager.cluster.listen-address" + }, + "peer_timeout": { + "default": "15s", + "description": "Time to wait between peers to send notifications.", + "type": "string", + "x-cli-flag": "alertmanager.cluster.peer-timeout", + "x-format": "duration" + }, + "peers": { + "description": "Comma-separated list of initial peers.", + "type": "string", + "x-cli-flag": "alertmanager.cluster.peers" + }, + "push_pull_interval": { + "default": "1m0s", + "description": "The interval between gossip state syncs. Setting this interval lower (more frequent) will increase convergence speeds across larger clusters at the expense of increased bandwidth usage.", + "type": "string", + "x-cli-flag": "alertmanager.cluster.push-pull-interval", + "x-format": "duration" + } + }, + "type": "object" + }, + "data_dir": { + "default": "data/", + "description": "Base path for data storage.", + "type": "string", + "x-cli-flag": "alertmanager.storage.path" + }, + "disabled_tenants": { + "description": "Comma separated list of tenants whose alerts this alertmanager cannot process. If specified, a alertmanager that would normally pick the specified tenant(s) for processing will ignore them instead.", + "type": "string", + "x-cli-flag": "alertmanager.disabled-tenants" + }, + "enable_api": { + "default": false, + "description": "Enable the experimental alertmanager config api.", + "type": "boolean", + "x-cli-flag": "experimental.alertmanager.enable-api" + }, + "enabled_tenants": { + "description": "Comma separated list of tenants whose alerts this alertmanager can process. If specified, only these tenants will be handled by alertmanager, otherwise this alertmanager can process alerts from all tenants.", + "type": "string", + "x-cli-flag": "alertmanager.enabled-tenants" + }, + "external_url": { + "description": "The URL under which Alertmanager is externally reachable (for example, if Alertmanager is served via a reverse proxy). Used for generating relative and absolute links back to Alertmanager itself. If the URL has a path portion, it will be used to prefix all HTTP endpoints served by Alertmanager. If omitted, relevant URL components will be derived automatically.", + "format": "uri", + "type": "string", + "x-cli-flag": "alertmanager.web.external-url" + }, + "fallback_config_file": { + "description": "Filename of fallback config to use if none specified for instance.", + "type": "string", + "x-cli-flag": "alertmanager.configs.fallback" + }, + "gc_interval": { + "default": "30m0s", + "description": "Alertmanager alerts Garbage collection interval.", + "type": "string", + "x-cli-flag": "alertmanager.alerts-gc-interval", + "x-format": "duration" + }, + "max_recv_msg_size": { + "default": 16777216, + "description": "Maximum size (bytes) of an accepted HTTP request body.", + "type": "number", + "x-cli-flag": "alertmanager.max-recv-msg-size" + }, + "persist_interval": { + "default": "15m0s", + "description": "The interval between persisting the current alertmanager state (notification log and silences) to object storage. This is only used when sharding is enabled. This state is read when all replicas for a shard can not be contacted. In this scenario, having persisted the state more frequently will result in potentially fewer lost silences, and fewer duplicate notifications.", + "type": "string", + "x-cli-flag": "alertmanager.persist-interval", + "x-format": "duration" + }, + "poll_interval": { + "default": "15s", + "description": "How frequently to poll Cortex configs", + "type": "string", + "x-cli-flag": "alertmanager.configs.poll-interval", + "x-format": "duration" + }, + "retention": { + "default": "120h0m0s", + "description": "How long to keep data for.", + "type": "string", + "x-cli-flag": "alertmanager.storage.retention", + "x-format": "duration" + }, + "sharding_enabled": { + "default": false, + "description": "Shard tenants across multiple alertmanager instances.", + "type": "boolean", + "x-cli-flag": "alertmanager.sharding-enabled" + }, + "sharding_ring": { + "properties": { + "detailed_metrics_enabled": { + "default": true, + "description": "Set to true to enable ring detailed metrics. These metrics provide detailed information, such as token count and ownership per tenant. Disabling them can significantly decrease the number of metrics emitted.", + "type": "boolean", + "x-cli-flag": "alertmanager.sharding-ring.detailed-metrics-enabled" + }, + "final_sleep": { + "default": "0s", + "description": "The sleep seconds when alertmanager is shutting down. Need to be close to or larger than KV Store information propagation delay", + "type": "string", + "x-cli-flag": "alertmanager.sharding-ring.final-sleep", + "x-format": "duration" + }, + "heartbeat_period": { + "default": "15s", + "description": "Period at which to heartbeat to the ring. 0 = disabled.", + "type": "string", + "x-cli-flag": "alertmanager.sharding-ring.heartbeat-period", + "x-format": "duration" + }, + "heartbeat_timeout": { + "default": "1m0s", + "description": "The heartbeat timeout after which alertmanagers are considered unhealthy within the ring. 0 = never (timeout disabled).", + "type": "string", + "x-cli-flag": "alertmanager.sharding-ring.heartbeat-timeout", + "x-format": "duration" + }, + "instance_availability_zone": { + "description": "The availability zone where this instance is running. Required if zone-awareness is enabled.", + "type": "string", + "x-cli-flag": "alertmanager.sharding-ring.instance-availability-zone" + }, + "instance_interface_names": { + "default": "[eth0 en0]", + "description": "Name of network interface to read address from.", + "items": { + "type": "string" + }, + "type": "array", + "x-cli-flag": "alertmanager.sharding-ring.instance-interface-names" + }, + "keep_instance_in_the_ring_on_shutdown": { + "default": false, + "description": "Keep instance in the ring on shut down.", + "type": "boolean", + "x-cli-flag": "alertmanager.sharding-ring.keep-instance-in-the-ring-on-shutdown" + }, + "kvstore": { + "description": "The key-value store used to share the hash ring across multiple instances.", + "properties": { + "consul": { + "$ref": "#/definitions/consul_config" + }, + "dynamodb": { + "properties": { + "max_cas_retries": { + "default": 10, + "description": "Maximum number of retries for DDB KV CAS.", + "type": "number", + "x-cli-flag": "alertmanager.sharding-ring.dynamodb.max-cas-retries" + }, + "puller_sync_time": { + "default": "1m0s", + "description": "Time to refresh local ring with information on dynamodb.", + "type": "string", + "x-cli-flag": "alertmanager.sharding-ring.dynamodb.puller-sync-time", + "x-format": "duration" + }, + "region": { + "description": "Region to access dynamodb.", + "type": "string", + "x-cli-flag": "alertmanager.sharding-ring.dynamodb.region" + }, + "table_name": { + "description": "Table name to use on dynamodb.", + "type": "string", + "x-cli-flag": "alertmanager.sharding-ring.dynamodb.table-name" + }, + "timeout": { + "default": "2m0s", + "description": "Timeout of dynamoDbClient requests. Default is 2m.", + "type": "string", + "x-cli-flag": "alertmanager.sharding-ring.dynamodb.timeout", + "x-format": "duration" + }, + "ttl": { + "default": "0s", + "description": "Time to expire items on dynamodb.", + "type": "string", + "x-cli-flag": "alertmanager.sharding-ring.dynamodb.ttl-time", + "x-format": "duration" + } + }, + "type": "object" + }, + "etcd": { + "$ref": "#/definitions/etcd_config" + }, + "multi": { + "properties": { + "mirror_enabled": { + "default": false, + "description": "Mirror writes to secondary store.", + "type": "boolean", + "x-cli-flag": "alertmanager.sharding-ring.multi.mirror-enabled" + }, + "mirror_timeout": { + "default": "2s", + "description": "Timeout for storing value to secondary store.", + "type": "string", + "x-cli-flag": "alertmanager.sharding-ring.multi.mirror-timeout", + "x-format": "duration" + }, + "primary": { + "description": "Primary backend storage used by multi-client.", + "type": "string", + "x-cli-flag": "alertmanager.sharding-ring.multi.primary" + }, + "secondary": { + "description": "Secondary backend storage used by multi-client.", + "type": "string", + "x-cli-flag": "alertmanager.sharding-ring.multi.secondary" + } + }, + "type": "object" + }, + "prefix": { + "default": "alertmanagers/", + "description": "The prefix for the keys in the store. Should end with a /.", + "type": "string", + "x-cli-flag": "alertmanager.sharding-ring.prefix" + }, + "store": { + "default": "consul", + "description": "Backend storage to use for the ring. Supported values are: consul, dynamodb, etcd, inmemory, memberlist, multi.", + "type": "string", + "x-cli-flag": "alertmanager.sharding-ring.store" + } + }, + "type": "object" + }, + "replication_factor": { + "default": 3, + "description": "The replication factor to use when sharding the alertmanager.", + "type": "number", + "x-cli-flag": "alertmanager.sharding-ring.replication-factor" + }, + "tokens_file_path": { + "description": "File path where tokens are stored. If empty, tokens are not stored at shutdown and restored at startup.", + "type": "string", + "x-cli-flag": "alertmanager.sharding-ring.tokens-file-path" + }, + "wait_instance_state_timeout": { + "default": "10m0s", + "description": "Timeout for waiting on alertmanager to become desired state in the ring.", + "type": "string", + "x-cli-flag": "alertmanager.sharding-ring.wait-instance-state-timeout", + "x-format": "duration" + }, + "zone_awareness_enabled": { + "default": false, + "description": "True to enable zone-awareness and replicate alerts across different availability zones.", + "type": "boolean", + "x-cli-flag": "alertmanager.sharding-ring.zone-awareness-enabled" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "alertmanager_storage_config": { + "description": "The alertmanager_storage_config configures the Cortex alertmanager storage backend.", + "properties": { + "azure": { + "properties": { + "account_key": { + "description": "Azure storage account key", + "type": "string", + "x-cli-flag": "alertmanager-storage.azure.account-key" + }, + "account_name": { + "description": "Azure storage account name", + "type": "string", + "x-cli-flag": "alertmanager-storage.azure.account-name" + }, + "connection_string": { + "description": "The values of `account-name` and `endpoint-suffix` values will not be ignored if `connection-string` is set. Use this method over `account-key` if you need to authenticate via a SAS token or if you use the Azurite emulator.", + "type": "string", + "x-cli-flag": "alertmanager-storage.azure.connection-string" + }, + "container_name": { + "description": "Azure storage container name", + "type": "string", + "x-cli-flag": "alertmanager-storage.azure.container-name" + }, + "endpoint_suffix": { + "description": "Azure storage endpoint suffix without schema. The account name will be prefixed to this value to create the FQDN", + "type": "string", + "x-cli-flag": "alertmanager-storage.azure.endpoint-suffix" + }, + "http": { + "properties": { + "expect_continue_timeout": { + "default": "1s", + "description": "The time to wait for a server's first response headers after fully writing the request headers if the request has an Expect header. 0 to send the request body immediately.", + "type": "string", + "x-cli-flag": "alertmanager-storage.azure.expect-continue-timeout", + "x-format": "duration" + }, + "idle_conn_timeout": { + "default": "1m30s", + "description": "The time an idle connection will remain idle before closing.", + "type": "string", + "x-cli-flag": "alertmanager-storage.azure.http.idle-conn-timeout", + "x-format": "duration" + }, + "insecure_skip_verify": { + "default": false, + "description": "If the client connects via HTTPS and this option is enabled, the client will accept any certificate and hostname.", + "type": "boolean", + "x-cli-flag": "alertmanager-storage.azure.http.insecure-skip-verify" + }, + "max_connections_per_host": { + "default": 0, + "description": "Maximum number of connections per host. 0 means no limit.", + "type": "number", + "x-cli-flag": "alertmanager-storage.azure.max-connections-per-host" + }, + "max_idle_connections": { + "default": 100, + "description": "Maximum number of idle (keep-alive) connections across all hosts. 0 means no limit.", + "type": "number", + "x-cli-flag": "alertmanager-storage.azure.max-idle-connections" + }, + "max_idle_connections_per_host": { + "default": 100, + "description": "Maximum number of idle (keep-alive) connections to keep per-host. If 0, a built-in default value is used.", + "type": "number", + "x-cli-flag": "alertmanager-storage.azure.max-idle-connections-per-host" + }, + "response_header_timeout": { + "default": "2m0s", + "description": "The amount of time the client will wait for a servers response headers.", + "type": "string", + "x-cli-flag": "alertmanager-storage.azure.http.response-header-timeout", + "x-format": "duration" + }, + "tls_handshake_timeout": { + "default": "10s", + "description": "Maximum time to wait for a TLS handshake. 0 means no limit.", + "type": "string", + "x-cli-flag": "alertmanager-storage.azure.tls-handshake-timeout", + "x-format": "duration" + } + }, + "type": "object" + }, + "max_retries": { + "default": 20, + "description": "Number of retries for recoverable errors", + "type": "number", + "x-cli-flag": "alertmanager-storage.azure.max-retries" + }, + "msi_resource": { + "description": "Deprecated: Azure storage MSI resource. It will be set automatically by Azure SDK.", + "type": "string", + "x-cli-flag": "alertmanager-storage.azure.msi-resource" + }, + "user_assigned_id": { + "description": "Azure storage MSI resource managed identity client Id. If not supplied default Azure credential will be used. Set it to empty if you need to authenticate via Azure Workload Identity.", + "type": "string", + "x-cli-flag": "alertmanager-storage.azure.user-assigned-id" + } + }, + "type": "object" + }, + "backend": { + "default": "s3", + "description": "Backend storage to use. Supported backends are: s3, gcs, azure, swift, filesystem, configdb, local.", + "type": "string", + "x-cli-flag": "alertmanager-storage.backend" + }, + "configdb": { + "$ref": "#/definitions/configstore_config" + }, + "filesystem": { + "properties": { + "dir": { + "description": "Local filesystem storage directory.", + "type": "string", + "x-cli-flag": "alertmanager-storage.filesystem.dir" + } + }, + "type": "object" + }, + "gcs": { + "properties": { + "bucket_name": { + "description": "GCS bucket name", + "type": "string", + "x-cli-flag": "alertmanager-storage.gcs.bucket-name" + }, + "service_account": { + "description": "JSON representing either a Google Developers Console client_credentials.json file or a Google Developers service account key file. If empty, fallback to Google default logic.", + "type": "string", + "x-cli-flag": "alertmanager-storage.gcs.service-account" + } + }, + "type": "object" + }, + "local": { + "properties": { + "path": { + "description": "Path at which alertmanager configurations are stored.", + "type": "string", + "x-cli-flag": "alertmanager-storage.local.path" + } + }, + "type": "object" + }, + "s3": { + "properties": { + "access_key_id": { + "description": "S3 access key ID", + "type": "string", + "x-cli-flag": "alertmanager-storage.s3.access-key-id" + }, + "bucket_lookup_type": { + "default": "auto", + "description": "The s3 bucket lookup style. Supported values are: auto, virtual-hosted, path.", + "type": "string", + "x-cli-flag": "alertmanager-storage.s3.bucket-lookup-type" + }, + "bucket_name": { + "description": "S3 bucket name", + "type": "string", + "x-cli-flag": "alertmanager-storage.s3.bucket-name" + }, + "disable_dualstack": { + "default": false, + "description": "If enabled, S3 endpoint will use the non-dualstack variant.", + "type": "boolean", + "x-cli-flag": "alertmanager-storage.s3.disable-dualstack" + }, + "endpoint": { + "description": "The S3 bucket endpoint. It could be an AWS S3 endpoint listed at https://docs.aws.amazon.com/general/latest/gr/s3.html or the address of an S3-compatible service in hostname:port format.", + "type": "string", + "x-cli-flag": "alertmanager-storage.s3.endpoint" + }, + "http": { + "properties": { + "expect_continue_timeout": { + "default": "1s", + "description": "The time to wait for a server's first response headers after fully writing the request headers if the request has an Expect header. 0 to send the request body immediately.", + "type": "string", + "x-cli-flag": "alertmanager-storage.s3.expect-continue-timeout", + "x-format": "duration" + }, + "idle_conn_timeout": { + "default": "1m30s", + "description": "The time an idle connection will remain idle before closing.", + "type": "string", + "x-cli-flag": "alertmanager-storage.s3.http.idle-conn-timeout", + "x-format": "duration" + }, + "insecure_skip_verify": { + "default": false, + "description": "If the client connects via HTTPS and this option is enabled, the client will accept any certificate and hostname.", + "type": "boolean", + "x-cli-flag": "alertmanager-storage.s3.http.insecure-skip-verify" + }, + "max_connections_per_host": { + "default": 0, + "description": "Maximum number of connections per host. 0 means no limit.", + "type": "number", + "x-cli-flag": "alertmanager-storage.s3.max-connections-per-host" + }, + "max_idle_connections": { + "default": 100, + "description": "Maximum number of idle (keep-alive) connections across all hosts. 0 means no limit.", + "type": "number", + "x-cli-flag": "alertmanager-storage.s3.max-idle-connections" + }, + "max_idle_connections_per_host": { + "default": 100, + "description": "Maximum number of idle (keep-alive) connections to keep per-host. If 0, a built-in default value is used.", + "type": "number", + "x-cli-flag": "alertmanager-storage.s3.max-idle-connections-per-host" + }, + "response_header_timeout": { + "default": "2m0s", + "description": "The amount of time the client will wait for a servers response headers.", + "type": "string", + "x-cli-flag": "alertmanager-storage.s3.http.response-header-timeout", + "x-format": "duration" + }, + "tls_handshake_timeout": { + "default": "10s", + "description": "Maximum time to wait for a TLS handshake. 0 means no limit.", + "type": "string", + "x-cli-flag": "alertmanager-storage.s3.tls-handshake-timeout", + "x-format": "duration" + } + }, + "type": "object" + }, + "insecure": { + "default": false, + "description": "If enabled, use http:// for the S3 endpoint instead of https://. This could be useful in local dev/test environments while using an S3-compatible backend storage, like Minio.", + "type": "boolean", + "x-cli-flag": "alertmanager-storage.s3.insecure" + }, + "list_objects_version": { + "description": "The list api version. Supported values are: v1, v2, and ''.", + "type": "string", + "x-cli-flag": "alertmanager-storage.s3.list-objects-version" + }, + "region": { + "description": "S3 region. If unset, the client will issue a S3 GetBucketLocation API call to autodetect it.", + "type": "string", + "x-cli-flag": "alertmanager-storage.s3.region" + }, + "secret_access_key": { + "description": "S3 secret access key", + "type": "string", + "x-cli-flag": "alertmanager-storage.s3.secret-access-key" + }, + "send_content_md5": { + "default": true, + "description": "If true, attach MD5 checksum when upload objects and S3 uses MD5 checksum algorithm to verify the provided digest. If false, use CRC32C algorithm instead.", + "type": "boolean", + "x-cli-flag": "alertmanager-storage.s3.send-content-md5" + }, + "signature_version": { + "default": "v4", + "description": "The signature version to use for authenticating against S3. Supported values are: v4, v2.", + "type": "string", + "x-cli-flag": "alertmanager-storage.s3.signature-version" + }, + "sse": { + "$ref": "#/definitions/s3_sse_config" + } + }, + "type": "object" + }, + "swift": { + "properties": { + "application_credential_id": { + "description": "OpenStack Swift application credential ID.", + "type": "string", + "x-cli-flag": "alertmanager-storage.swift.application-credential-id" + }, + "application_credential_name": { + "description": "OpenStack Swift application credential name.", + "type": "string", + "x-cli-flag": "alertmanager-storage.swift.application-credential-name" + }, + "application_credential_secret": { + "description": "OpenStack Swift application credential secret.", + "type": "string", + "x-cli-flag": "alertmanager-storage.swift.application-credential-secret" + }, + "auth_url": { + "description": "OpenStack Swift authentication URL", + "type": "string", + "x-cli-flag": "alertmanager-storage.swift.auth-url" + }, + "auth_version": { + "default": 0, + "description": "OpenStack Swift authentication API version. 0 to autodetect.", + "type": "number", + "x-cli-flag": "alertmanager-storage.swift.auth-version" + }, + "connect_timeout": { + "default": "10s", + "description": "Time after which a connection attempt is aborted.", + "type": "string", + "x-cli-flag": "alertmanager-storage.swift.connect-timeout", + "x-format": "duration" + }, + "container_name": { + "description": "Name of the OpenStack Swift container to put chunks in.", + "type": "string", + "x-cli-flag": "alertmanager-storage.swift.container-name" + }, + "domain_id": { + "description": "OpenStack Swift user's domain ID.", + "type": "string", + "x-cli-flag": "alertmanager-storage.swift.domain-id" + }, + "domain_name": { + "description": "OpenStack Swift user's domain name.", + "type": "string", + "x-cli-flag": "alertmanager-storage.swift.domain-name" + }, + "max_retries": { + "default": 3, + "description": "Max retries on requests error.", + "type": "number", + "x-cli-flag": "alertmanager-storage.swift.max-retries" + }, + "password": { + "description": "OpenStack Swift API key.", + "type": "string", + "x-cli-flag": "alertmanager-storage.swift.password" + }, + "project_domain_id": { + "description": "ID of the OpenStack Swift project's domain (v3 auth only), only needed if it differs the from user domain.", + "type": "string", + "x-cli-flag": "alertmanager-storage.swift.project-domain-id" + }, + "project_domain_name": { + "description": "Name of the OpenStack Swift project's domain (v3 auth only), only needed if it differs from the user domain.", + "type": "string", + "x-cli-flag": "alertmanager-storage.swift.project-domain-name" + }, + "project_id": { + "description": "OpenStack Swift project ID (v2,v3 auth only).", + "type": "string", + "x-cli-flag": "alertmanager-storage.swift.project-id" + }, + "project_name": { + "description": "OpenStack Swift project name (v2,v3 auth only).", + "type": "string", + "x-cli-flag": "alertmanager-storage.swift.project-name" + }, + "region_name": { + "description": "OpenStack Swift Region to use (v2,v3 auth only).", + "type": "string", + "x-cli-flag": "alertmanager-storage.swift.region-name" + }, + "request_timeout": { + "default": "5s", + "description": "Time after which an idle request is aborted. The timeout watchdog is reset each time some data is received, so the timeout triggers after X time no data is received on a request.", + "type": "string", + "x-cli-flag": "alertmanager-storage.swift.request-timeout", + "x-format": "duration" + }, + "user_domain_id": { + "description": "OpenStack Swift user's domain ID.", + "type": "string", + "x-cli-flag": "alertmanager-storage.swift.user-domain-id" + }, + "user_domain_name": { + "description": "OpenStack Swift user's domain name.", + "type": "string", + "x-cli-flag": "alertmanager-storage.swift.user-domain-name" + }, + "user_id": { + "description": "OpenStack Swift user ID.", + "type": "string", + "x-cli-flag": "alertmanager-storage.swift.user-id" + }, + "username": { + "description": "OpenStack Swift username.", + "type": "string", + "x-cli-flag": "alertmanager-storage.swift.username" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "blocks_storage_config": { + "description": "The blocks_storage_config configures the blocks storage.", + "properties": { + "azure": { + "properties": { + "account_key": { + "description": "Azure storage account key", + "type": "string", + "x-cli-flag": "blocks-storage.azure.account-key" + }, + "account_name": { + "description": "Azure storage account name", + "type": "string", + "x-cli-flag": "blocks-storage.azure.account-name" + }, + "connection_string": { + "description": "The values of `account-name` and `endpoint-suffix` values will not be ignored if `connection-string` is set. Use this method over `account-key` if you need to authenticate via a SAS token or if you use the Azurite emulator.", + "type": "string", + "x-cli-flag": "blocks-storage.azure.connection-string" + }, + "container_name": { + "description": "Azure storage container name", + "type": "string", + "x-cli-flag": "blocks-storage.azure.container-name" + }, + "endpoint_suffix": { + "description": "Azure storage endpoint suffix without schema. The account name will be prefixed to this value to create the FQDN", + "type": "string", + "x-cli-flag": "blocks-storage.azure.endpoint-suffix" + }, + "http": { + "properties": { + "expect_continue_timeout": { + "default": "1s", + "description": "The time to wait for a server's first response headers after fully writing the request headers if the request has an Expect header. 0 to send the request body immediately.", + "type": "string", + "x-cli-flag": "blocks-storage.azure.expect-continue-timeout", + "x-format": "duration" + }, + "idle_conn_timeout": { + "default": "1m30s", + "description": "The time an idle connection will remain idle before closing.", + "type": "string", + "x-cli-flag": "blocks-storage.azure.http.idle-conn-timeout", + "x-format": "duration" + }, + "insecure_skip_verify": { + "default": false, + "description": "If the client connects via HTTPS and this option is enabled, the client will accept any certificate and hostname.", + "type": "boolean", + "x-cli-flag": "blocks-storage.azure.http.insecure-skip-verify" + }, + "max_connections_per_host": { + "default": 0, + "description": "Maximum number of connections per host. 0 means no limit.", + "type": "number", + "x-cli-flag": "blocks-storage.azure.max-connections-per-host" + }, + "max_idle_connections": { + "default": 100, + "description": "Maximum number of idle (keep-alive) connections across all hosts. 0 means no limit.", + "type": "number", + "x-cli-flag": "blocks-storage.azure.max-idle-connections" + }, + "max_idle_connections_per_host": { + "default": 100, + "description": "Maximum number of idle (keep-alive) connections to keep per-host. If 0, a built-in default value is used.", + "type": "number", + "x-cli-flag": "blocks-storage.azure.max-idle-connections-per-host" + }, + "response_header_timeout": { + "default": "2m0s", + "description": "The amount of time the client will wait for a servers response headers.", + "type": "string", + "x-cli-flag": "blocks-storage.azure.http.response-header-timeout", + "x-format": "duration" + }, + "tls_handshake_timeout": { + "default": "10s", + "description": "Maximum time to wait for a TLS handshake. 0 means no limit.", + "type": "string", + "x-cli-flag": "blocks-storage.azure.tls-handshake-timeout", + "x-format": "duration" + } + }, + "type": "object" + }, + "max_retries": { + "default": 20, + "description": "Number of retries for recoverable errors", + "type": "number", + "x-cli-flag": "blocks-storage.azure.max-retries" + }, + "msi_resource": { + "description": "Deprecated: Azure storage MSI resource. It will be set automatically by Azure SDK.", + "type": "string", + "x-cli-flag": "blocks-storage.azure.msi-resource" + }, + "user_assigned_id": { + "description": "Azure storage MSI resource managed identity client Id. If not supplied default Azure credential will be used. Set it to empty if you need to authenticate via Azure Workload Identity.", + "type": "string", + "x-cli-flag": "blocks-storage.azure.user-assigned-id" + } + }, + "type": "object" + }, + "backend": { + "default": "s3", + "description": "Backend storage to use. Supported backends are: s3, gcs, azure, swift, filesystem.", + "type": "string", + "x-cli-flag": "blocks-storage.backend" + }, + "bucket_store": { + "description": "This configures how the querier and store-gateway discover and synchronize blocks stored in the bucket.", + "properties": { + "block_discovery_strategy": { + "default": "concurrent", + "description": "One of concurrent, recursive, bucket_index. When set to concurrent, stores will concurrently issue one call per directory to discover active blocks in the bucket. The recursive strategy iterates through all objects in the bucket, recursively traversing into each directory. This avoids N+1 calls at the expense of having slower bucket iterations. bucket_index strategy can be used in Compactor only and utilizes the existing bucket index to fetch block IDs to sync. This avoids iterating the bucket but can be impacted by delays of cleaner creating bucket index.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.block-discovery-strategy" + }, + "block_sync_concurrency": { + "default": 20, + "description": "Maximum number of concurrent blocks syncing per tenant.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.block-sync-concurrency" + }, + "bucket_index": { + "properties": { + "enabled": { + "default": false, + "description": "True to enable querier and store-gateway to discover blocks in the storage via bucket index instead of bucket scanning.", + "type": "boolean", + "x-cli-flag": "blocks-storage.bucket-store.bucket-index.enabled" + }, + "idle_timeout": { + "default": "1h0m0s", + "description": "How long a unused bucket index should be cached. Once this timeout expires, the unused bucket index is removed from the in-memory cache. This option is used only by querier.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.bucket-index.idle-timeout", + "x-format": "duration" + }, + "max_stale_period": { + "default": "1h0m0s", + "description": "The maximum allowed age of a bucket index (last updated) before queries start failing because the bucket index is too old. The bucket index is periodically updated by the compactor, while this check is enforced in the querier (at query time).", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.bucket-index.max-stale-period", + "x-format": "duration" + }, + "update_on_error_interval": { + "default": "1m0s", + "description": "How frequently a bucket index, which previously failed to load, should be tried to load again. This option is used only by querier.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.bucket-index.update-on-error-interval", + "x-format": "duration" + } + }, + "type": "object" + }, + "chunks_cache": { + "properties": { + "attributes_ttl": { + "default": "168h0m0s", + "description": "TTL for caching object attributes for chunks.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.attributes-ttl", + "x-format": "duration" + }, + "backend": { + "description": "The chunks cache backend type. Single or Multiple cache backend can be provided. Supported values in single cache: memcached, redis, inmemory, and '' (disable). Supported values in multi level cache: a comma-separated list of (inmemory, memcached, redis)", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.backend" + }, + "inmemory": { + "properties": { + "max_size_bytes": { + "default": 1073741824, + "description": "Maximum size in bytes of in-memory chunks cache used (shared between all tenants).", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.inmemory.max-size-bytes" + } + }, + "type": "object" + }, + "max_get_range_requests": { + "default": 3, + "description": "Maximum number of sub-GetRange requests that a single GetRange request can be split into when fetching chunks. Zero or negative value = unlimited number of sub-requests.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.max-get-range-requests" + }, + "memcached": { + "properties": { + "addresses": { + "description": "Comma separated list of memcached addresses. Supported prefixes are: dns+ (looked up as an A/AAAA query), dnssrv+ (looked up as a SRV query, dnssrvnoa+ (looked up as a SRV query, with no A/AAAA lookup made after that).", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.memcached.addresses" + }, + "auto_discovery": { + "default": false, + "description": "Use memcached auto-discovery mechanism provided by some cloud provider like GCP and AWS", + "type": "boolean", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.memcached.auto-discovery" + }, + "max_async_buffer_size": { + "default": 10000, + "description": "The maximum number of enqueued asynchronous operations allowed.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.memcached.max-async-buffer-size" + }, + "max_async_concurrency": { + "default": 3, + "description": "The maximum number of concurrent asynchronous operations can occur.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.memcached.max-async-concurrency" + }, + "max_get_multi_batch_size": { + "default": 0, + "description": "The maximum number of keys a single underlying get operation should run. If more keys are specified, internally keys are split into multiple batches and fetched concurrently, honoring the max concurrency. If set to 0, the max batch size is unlimited.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-batch-size" + }, + "max_get_multi_concurrency": { + "default": 100, + "description": "The maximum number of concurrent connections running get operations. If set to 0, concurrency is unlimited.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency" + }, + "max_idle_connections": { + "default": 16, + "description": "The maximum number of idle connections that will be maintained per address.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections" + }, + "max_item_size": { + "default": 1048576, + "description": "The maximum size of an item stored in memcached. Bigger items are not stored. If set to 0, no maximum size is enforced.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.memcached.max-item-size" + }, + "set_async_circuit_breaker_config": { + "properties": { + "consecutive_failures": { + "default": 5, + "description": "Consecutive failures to determine if the circuit breaker should open.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.memcached.set-async.circuit-breaker.consecutive-failures" + }, + "enabled": { + "default": false, + "description": "If true, enable circuit breaker.", + "type": "boolean", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.memcached.set-async.circuit-breaker.enabled" + }, + "failure_percent": { + "default": 0.05, + "description": "Failure percentage to determine if the circuit breaker should open.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.memcached.set-async.circuit-breaker.failure-percent" + }, + "half_open_max_requests": { + "default": 10, + "description": "Maximum number of requests allowed to pass through when the circuit breaker is half-open. If set to 0, by default it allows 1 request.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.memcached.set-async.circuit-breaker.half-open-max-requests" + }, + "min_requests": { + "default": 50, + "description": "Minimal requests to trigger the circuit breaker.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.memcached.set-async.circuit-breaker.min-requests" + }, + "open_duration": { + "default": "5s", + "description": "Period of the open state after which the state of the circuit breaker becomes half-open. If set to 0, by default open duration is 60 seconds.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.memcached.set-async.circuit-breaker.open-duration", + "x-format": "duration" + } + }, + "type": "object" + }, + "timeout": { + "default": "100ms", + "description": "The socket read/write timeout.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.memcached.timeout", + "x-format": "duration" + } + }, + "type": "object" + }, + "multilevel": { + "properties": { + "max_async_buffer_size": { + "default": 10000, + "description": "The maximum number of enqueued asynchronous operations allowed when backfilling cache items.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.multilevel.max-async-buffer-size" + }, + "max_async_concurrency": { + "default": 3, + "description": "The maximum number of concurrent asynchronous operations can occur when backfilling cache items.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.multilevel.max-async-concurrency" + }, + "max_backfill_items": { + "default": 10000, + "description": "The maximum number of items to backfill per asynchronous operation.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.multilevel.max-backfill-items" + } + }, + "type": "object" + }, + "redis": { + "properties": { + "addresses": { + "description": "Comma separated list of redis addresses. Supported prefixes are: dns+ (looked up as an A/AAAA query), dnssrv+ (looked up as a SRV query, dnssrvnoa+ (looked up as a SRV query, with no A/AAAA lookup made after that).", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.redis.addresses" + }, + "cache_size": { + "default": 0, + "description": "If not zero then client-side caching is enabled. Client-side caching is when data is stored in memory instead of fetching data each time. See https://redis.io/docs/manual/client-side-caching/ for more info.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.redis.cache-size" + }, + "db": { + "default": 0, + "description": "Database to be selected after connecting to the server.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.redis.db" + }, + "dial_timeout": { + "default": "5s", + "description": "Client dial timeout.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.redis.dial-timeout", + "x-format": "duration" + }, + "get_multi_batch_size": { + "default": 100, + "description": "The maximum size per batch for mget.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.redis.get-multi-batch-size" + }, + "master_name": { + "description": "Specifies the master's name. Must be not empty for Redis Sentinel.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.redis.master-name" + }, + "max_async_buffer_size": { + "default": 10000, + "description": "The maximum number of enqueued asynchronous operations allowed.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.redis.max-async-buffer-size" + }, + "max_async_concurrency": { + "default": 3, + "description": "The maximum number of concurrent asynchronous operations can occur.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.redis.max-async-concurrency" + }, + "max_get_multi_concurrency": { + "default": 100, + "description": "The maximum number of concurrent GetMulti() operations. If set to 0, concurrency is unlimited.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.redis.max-get-multi-concurrency" + }, + "max_set_multi_concurrency": { + "default": 100, + "description": "The maximum number of concurrent SetMulti() operations. If set to 0, concurrency is unlimited.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.redis.max-set-multi-concurrency" + }, + "password": { + "description": "Redis password.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.redis.password" + }, + "read_timeout": { + "default": "3s", + "description": "Client read timeout.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.redis.read-timeout", + "x-format": "duration" + }, + "set_async_circuit_breaker_config": { + "properties": { + "consecutive_failures": { + "default": 5, + "description": "Consecutive failures to determine if the circuit breaker should open.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.redis.set-async.circuit-breaker.consecutive-failures" + }, + "enabled": { + "default": false, + "description": "If true, enable circuit breaker.", + "type": "boolean", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.redis.set-async.circuit-breaker.enabled" + }, + "failure_percent": { + "default": 0.05, + "description": "Failure percentage to determine if the circuit breaker should open.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.redis.set-async.circuit-breaker.failure-percent" + }, + "half_open_max_requests": { + "default": 10, + "description": "Maximum number of requests allowed to pass through when the circuit breaker is half-open. If set to 0, by default it allows 1 request.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.redis.set-async.circuit-breaker.half-open-max-requests" + }, + "min_requests": { + "default": 50, + "description": "Minimal requests to trigger the circuit breaker.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.redis.set-async.circuit-breaker.min-requests" + }, + "open_duration": { + "default": "5s", + "description": "Period of the open state after which the state of the circuit breaker becomes half-open. If set to 0, by default open duration is 60 seconds.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.redis.set-async.circuit-breaker.open-duration", + "x-format": "duration" + } + }, + "type": "object" + }, + "set_multi_batch_size": { + "default": 100, + "description": "The maximum size per batch for pipeline set.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.redis.set-multi-batch-size" + }, + "tls_ca_path": { + "description": "Path to the CA certificates file to validate server certificate against. If not set, the host's root CA certificates are used.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.redis.tls-ca-path" + }, + "tls_cert_path": { + "description": "Path to the client certificate file, which will be used for authenticating with the server. Also requires the key path to be configured.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.redis.tls-cert-path" + }, + "tls_enabled": { + "default": false, + "description": "Whether to enable tls for redis connection.", + "type": "boolean", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.redis.tls-enabled" + }, + "tls_insecure_skip_verify": { + "default": false, + "description": "Skip validating server certificate.", + "type": "boolean", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.redis.tls-insecure-skip-verify" + }, + "tls_key_path": { + "description": "Path to the key file for the client certificate. Also requires the client certificate to be configured.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.redis.tls-key-path" + }, + "tls_server_name": { + "description": "Override the expected name on the server certificate.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.redis.tls-server-name" + }, + "username": { + "description": "Redis username.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.redis.username" + }, + "write_timeout": { + "default": "3s", + "description": "Client write timeout.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.redis.write-timeout", + "x-format": "duration" + } + }, + "type": "object" + }, + "subrange_size": { + "default": 16000, + "description": "Size of each subrange that bucket object is split into for better caching.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.subrange-size" + }, + "subrange_ttl": { + "default": "24h0m0s", + "description": "TTL for caching individual chunks subranges.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.subrange-ttl", + "x-format": "duration" + } + }, + "type": "object" + }, + "consistency_delay": { + "default": "0s", + "description": "Minimum age of a block before it's being read. Set it to safe value (e.g 30m) if your object storage is eventually consistent. GCS and S3 are (roughly) strongly consistent.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.consistency-delay", + "x-format": "duration" + }, + "ignore_blocks_before": { + "default": "0s", + "description": "The blocks created before `now() - ignore_blocks_before` will not be synced. 0 to disable.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.ignore-blocks-before", + "x-format": "duration" + }, + "ignore_blocks_within": { + "default": "0s", + "description": "The blocks created since `now() - ignore_blocks_within` will not be synced. This should be used together with `-querier.query-store-after` to filter out the blocks that are too new to be queried. A reasonable value for this flag would be `-querier.query-store-after - blocks-storage.bucket-store.bucket-index.max-stale-period` to give some buffer. 0 to disable.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.ignore-blocks-within", + "x-format": "duration" + }, + "ignore_deletion_mark_delay": { + "default": "6h0m0s", + "description": "Duration after which the blocks marked for deletion will be filtered out while fetching blocks. The idea of ignore-deletion-marks-delay is to ignore blocks that are marked for deletion with some delay. This ensures store can still serve blocks that are meant to be deleted but do not have a replacement yet. Default is 6h, half of the default value for -compactor.deletion-delay.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.ignore-deletion-marks-delay", + "x-format": "duration" + }, + "index_cache": { + "properties": { + "backend": { + "default": "inmemory", + "description": "The index cache backend type. Multiple cache backend can be provided as a comma-separated ordered list to enable the implementation of a cache hierarchy. Supported values: inmemory, memcached, redis.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.backend" + }, + "inmemory": { + "properties": { + "enabled_items": { + "default": [], + "description": "Selectively cache index item types. Supported values are Postings, ExpandedPostings and Series", + "items": { + "type": "string" + }, + "type": "array", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.inmemory.enabled-items" + }, + "max_size_bytes": { + "default": 1073741824, + "description": "Maximum size in bytes of in-memory index cache used to speed up blocks index lookups (shared between all tenants).", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.inmemory.max-size-bytes" + } + }, + "type": "object" + }, + "memcached": { + "properties": { + "addresses": { + "description": "Comma separated list of memcached addresses. Supported prefixes are: dns+ (looked up as an A/AAAA query), dnssrv+ (looked up as a SRV query, dnssrvnoa+ (looked up as a SRV query, with no A/AAAA lookup made after that).", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.memcached.addresses" + }, + "auto_discovery": { + "default": false, + "description": "Use memcached auto-discovery mechanism provided by some cloud provider like GCP and AWS", + "type": "boolean", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.memcached.auto-discovery" + }, + "enabled_items": { + "default": [], + "description": "Selectively cache index item types. Supported values are Postings, ExpandedPostings and Series", + "items": { + "type": "string" + }, + "type": "array", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.memcached.enabled-items" + }, + "max_async_buffer_size": { + "default": 10000, + "description": "The maximum number of enqueued asynchronous operations allowed.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.memcached.max-async-buffer-size" + }, + "max_async_concurrency": { + "default": 3, + "description": "The maximum number of concurrent asynchronous operations can occur.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency" + }, + "max_get_multi_batch_size": { + "default": 0, + "description": "The maximum number of keys a single underlying get operation should run. If more keys are specified, internally keys are split into multiple batches and fetched concurrently, honoring the max concurrency. If set to 0, the max batch size is unlimited.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.memcached.max-get-multi-batch-size" + }, + "max_get_multi_concurrency": { + "default": 100, + "description": "The maximum number of concurrent connections running get operations. If set to 0, concurrency is unlimited.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency" + }, + "max_idle_connections": { + "default": 16, + "description": "The maximum number of idle connections that will be maintained per address.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.memcached.max-idle-connections" + }, + "max_item_size": { + "default": 1048576, + "description": "The maximum size of an item stored in memcached. Bigger items are not stored. If set to 0, no maximum size is enforced.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.memcached.max-item-size" + }, + "set_async_circuit_breaker_config": { + "properties": { + "consecutive_failures": { + "default": 5, + "description": "Consecutive failures to determine if the circuit breaker should open.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.memcached.set-async.circuit-breaker.consecutive-failures" + }, + "enabled": { + "default": false, + "description": "If true, enable circuit breaker.", + "type": "boolean", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.memcached.set-async.circuit-breaker.enabled" + }, + "failure_percent": { + "default": 0.05, + "description": "Failure percentage to determine if the circuit breaker should open.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.memcached.set-async.circuit-breaker.failure-percent" + }, + "half_open_max_requests": { + "default": 10, + "description": "Maximum number of requests allowed to pass through when the circuit breaker is half-open. If set to 0, by default it allows 1 request.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.memcached.set-async.circuit-breaker.half-open-max-requests" + }, + "min_requests": { + "default": 50, + "description": "Minimal requests to trigger the circuit breaker.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.memcached.set-async.circuit-breaker.min-requests" + }, + "open_duration": { + "default": "5s", + "description": "Period of the open state after which the state of the circuit breaker becomes half-open. If set to 0, by default open duration is 60 seconds.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.memcached.set-async.circuit-breaker.open-duration", + "x-format": "duration" + } + }, + "type": "object" + }, + "timeout": { + "default": "100ms", + "description": "The socket read/write timeout.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.memcached.timeout", + "x-format": "duration" + } + }, + "type": "object" + }, + "multilevel": { + "properties": { + "max_async_buffer_size": { + "default": 10000, + "description": "The maximum number of enqueued asynchronous operations allowed when backfilling cache items.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.multilevel.max-async-buffer-size" + }, + "max_async_concurrency": { + "default": 3, + "description": "The maximum number of concurrent asynchronous operations can occur when backfilling cache items.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.multilevel.max-async-concurrency" + }, + "max_backfill_items": { + "default": 10000, + "description": "The maximum number of items to backfill per asynchronous operation.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.multilevel.max-backfill-items" + } + }, + "type": "object" + }, + "redis": { + "properties": { + "addresses": { + "description": "Comma separated list of redis addresses. Supported prefixes are: dns+ (looked up as an A/AAAA query), dnssrv+ (looked up as a SRV query, dnssrvnoa+ (looked up as a SRV query, with no A/AAAA lookup made after that).", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.redis.addresses" + }, + "cache_size": { + "default": 0, + "description": "If not zero then client-side caching is enabled. Client-side caching is when data is stored in memory instead of fetching data each time. See https://redis.io/docs/manual/client-side-caching/ for more info.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.redis.cache-size" + }, + "db": { + "default": 0, + "description": "Database to be selected after connecting to the server.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.redis.db" + }, + "dial_timeout": { + "default": "5s", + "description": "Client dial timeout.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.redis.dial-timeout", + "x-format": "duration" + }, + "enabled_items": { + "default": [], + "description": "Selectively cache index item types. Supported values are Postings, ExpandedPostings and Series", + "items": { + "type": "string" + }, + "type": "array", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.redis.enabled-items" + }, + "get_multi_batch_size": { + "default": 100, + "description": "The maximum size per batch for mget.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.redis.get-multi-batch-size" + }, + "master_name": { + "description": "Specifies the master's name. Must be not empty for Redis Sentinel.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.redis.master-name" + }, + "max_async_buffer_size": { + "default": 10000, + "description": "The maximum number of enqueued asynchronous operations allowed.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.redis.max-async-buffer-size" + }, + "max_async_concurrency": { + "default": 3, + "description": "The maximum number of concurrent asynchronous operations can occur.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.redis.max-async-concurrency" + }, + "max_get_multi_concurrency": { + "default": 100, + "description": "The maximum number of concurrent GetMulti() operations. If set to 0, concurrency is unlimited.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.redis.max-get-multi-concurrency" + }, + "max_set_multi_concurrency": { + "default": 100, + "description": "The maximum number of concurrent SetMulti() operations. If set to 0, concurrency is unlimited.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.redis.max-set-multi-concurrency" + }, + "password": { + "description": "Redis password.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.redis.password" + }, + "read_timeout": { + "default": "3s", + "description": "Client read timeout.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.redis.read-timeout", + "x-format": "duration" + }, + "set_async_circuit_breaker_config": { + "properties": { + "consecutive_failures": { + "default": 5, + "description": "Consecutive failures to determine if the circuit breaker should open.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.redis.set-async.circuit-breaker.consecutive-failures" + }, + "enabled": { + "default": false, + "description": "If true, enable circuit breaker.", + "type": "boolean", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.redis.set-async.circuit-breaker.enabled" + }, + "failure_percent": { + "default": 0.05, + "description": "Failure percentage to determine if the circuit breaker should open.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.redis.set-async.circuit-breaker.failure-percent" + }, + "half_open_max_requests": { + "default": 10, + "description": "Maximum number of requests allowed to pass through when the circuit breaker is half-open. If set to 0, by default it allows 1 request.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.redis.set-async.circuit-breaker.half-open-max-requests" + }, + "min_requests": { + "default": 50, + "description": "Minimal requests to trigger the circuit breaker.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.redis.set-async.circuit-breaker.min-requests" + }, + "open_duration": { + "default": "5s", + "description": "Period of the open state after which the state of the circuit breaker becomes half-open. If set to 0, by default open duration is 60 seconds.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.redis.set-async.circuit-breaker.open-duration", + "x-format": "duration" + } + }, + "type": "object" + }, + "set_multi_batch_size": { + "default": 100, + "description": "The maximum size per batch for pipeline set.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.redis.set-multi-batch-size" + }, + "tls_ca_path": { + "description": "Path to the CA certificates file to validate server certificate against. If not set, the host's root CA certificates are used.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.redis.tls-ca-path" + }, + "tls_cert_path": { + "description": "Path to the client certificate file, which will be used for authenticating with the server. Also requires the key path to be configured.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.redis.tls-cert-path" + }, + "tls_enabled": { + "default": false, + "description": "Whether to enable tls for redis connection.", + "type": "boolean", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.redis.tls-enabled" + }, + "tls_insecure_skip_verify": { + "default": false, + "description": "Skip validating server certificate.", + "type": "boolean", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.redis.tls-insecure-skip-verify" + }, + "tls_key_path": { + "description": "Path to the key file for the client certificate. Also requires the client certificate to be configured.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.redis.tls-key-path" + }, + "tls_server_name": { + "description": "Override the expected name on the server certificate.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.redis.tls-server-name" + }, + "username": { + "description": "Redis username.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.redis.username" + }, + "write_timeout": { + "default": "3s", + "description": "Client write timeout.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.redis.write-timeout", + "x-format": "duration" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "index_header_lazy_loading_enabled": { + "default": false, + "description": "If enabled, store-gateway will lazily memory-map an index-header only once required by a query.", + "type": "boolean", + "x-cli-flag": "blocks-storage.bucket-store.index-header-lazy-loading-enabled" + }, + "index_header_lazy_loading_idle_timeout": { + "default": "20m0s", + "description": "If index-header lazy loading is enabled and this setting is \u003e 0, the store-gateway will release memory-mapped index-headers after 'idle timeout' inactivity.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.index-header-lazy-loading-idle-timeout", + "x-format": "duration" + }, + "lazy_expanded_posting_group_max_key_series_ratio": { + "default": 100, + "description": "Mark posting group as lazy if it fetches more keys than R * max series the query should fetch. With R set to 100, a posting group which fetches 100K keys will be marked as lazy if the current query only fetches 1000 series. This config is only valid if lazy expanded posting is enabled. 0 disables the limit.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.lazy-expanded-posting-group-max-key-series-ratio" + }, + "lazy_expanded_postings_enabled": { + "default": false, + "description": "If true, Store Gateway will estimate postings size and try to lazily expand postings if it downloads less data than expanding all postings.", + "type": "boolean", + "x-cli-flag": "blocks-storage.bucket-store.lazy-expanded-postings-enabled" + }, + "matchers_cache_max_items": { + "default": 0, + "description": "Maximum number of entries in the regex matchers cache. 0 to disable.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.matchers-cache-max-items" + }, + "max_chunk_pool_bytes": { + "default": 2147483648, + "description": "Max size - in bytes - of a chunks pool, used to reduce memory allocations. The pool is shared across all tenants. 0 to disable the limit.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.max-chunk-pool-bytes" + }, + "max_concurrent": { + "default": 100, + "description": "Max number of concurrent queries to execute against the long-term storage. The limit is shared across all tenants.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.max-concurrent" + }, + "max_inflight_requests": { + "default": 0, + "description": "Max number of inflight queries to execute against the long-term storage. The limit is shared across all tenants. 0 to disable.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.max-inflight-requests" + }, + "meta_sync_concurrency": { + "default": 20, + "description": "Number of Go routines to use when syncing block meta files from object storage per tenant.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.meta-sync-concurrency" + }, + "metadata_cache": { + "properties": { + "backend": { + "description": "The metadata cache backend type. Single or Multiple cache backend can be provided. Supported values in single cache: memcached, redis, inmemory, and '' (disable). Supported values in multi level cache: a comma-separated list of (inmemory, memcached, redis)", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.backend" + }, + "block_index_attributes_ttl": { + "default": "168h0m0s", + "description": "How long to cache attributes of the block index.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.block-index-attributes-ttl", + "x-format": "duration" + }, + "bucket_index_content_ttl": { + "default": "5m0s", + "description": "How long to cache content of the bucket index.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.bucket-index-content-ttl", + "x-format": "duration" + }, + "bucket_index_max_size_bytes": { + "default": 1048576, + "description": "Maximum size of bucket index content to cache in bytes. Caching will be skipped if the content exceeds this size. This is useful to avoid network round trip for large content if the configured caching backend has an hard limit on cached items size (in this case, you should set this limit to the same limit in the caching backend).", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.bucket-index-max-size-bytes" + }, + "chunks_list_ttl": { + "default": "24h0m0s", + "description": "How long to cache list of chunks for a block.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.chunks-list-ttl", + "x-format": "duration" + }, + "inmemory": { + "properties": { + "max_size_bytes": { + "default": 1073741824, + "description": "Maximum size in bytes of in-memory metadata cache used (shared between all tenants).", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.inmemory.max-size-bytes" + } + }, + "type": "object" + }, + "memcached": { + "properties": { + "addresses": { + "description": "Comma separated list of memcached addresses. Supported prefixes are: dns+ (looked up as an A/AAAA query), dnssrv+ (looked up as a SRV query, dnssrvnoa+ (looked up as a SRV query, with no A/AAAA lookup made after that).", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.memcached.addresses" + }, + "auto_discovery": { + "default": false, + "description": "Use memcached auto-discovery mechanism provided by some cloud provider like GCP and AWS", + "type": "boolean", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.memcached.auto-discovery" + }, + "max_async_buffer_size": { + "default": 10000, + "description": "The maximum number of enqueued asynchronous operations allowed.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.memcached.max-async-buffer-size" + }, + "max_async_concurrency": { + "default": 3, + "description": "The maximum number of concurrent asynchronous operations can occur.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency" + }, + "max_get_multi_batch_size": { + "default": 0, + "description": "The maximum number of keys a single underlying get operation should run. If more keys are specified, internally keys are split into multiple batches and fetched concurrently, honoring the max concurrency. If set to 0, the max batch size is unlimited.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.memcached.max-get-multi-batch-size" + }, + "max_get_multi_concurrency": { + "default": 100, + "description": "The maximum number of concurrent connections running get operations. If set to 0, concurrency is unlimited.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.memcached.max-get-multi-concurrency" + }, + "max_idle_connections": { + "default": 16, + "description": "The maximum number of idle connections that will be maintained per address.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.memcached.max-idle-connections" + }, + "max_item_size": { + "default": 1048576, + "description": "The maximum size of an item stored in memcached. Bigger items are not stored. If set to 0, no maximum size is enforced.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.memcached.max-item-size" + }, + "set_async_circuit_breaker_config": { + "properties": { + "consecutive_failures": { + "default": 5, + "description": "Consecutive failures to determine if the circuit breaker should open.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.memcached.set-async.circuit-breaker.consecutive-failures" + }, + "enabled": { + "default": false, + "description": "If true, enable circuit breaker.", + "type": "boolean", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.memcached.set-async.circuit-breaker.enabled" + }, + "failure_percent": { + "default": 0.05, + "description": "Failure percentage to determine if the circuit breaker should open.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.memcached.set-async.circuit-breaker.failure-percent" + }, + "half_open_max_requests": { + "default": 10, + "description": "Maximum number of requests allowed to pass through when the circuit breaker is half-open. If set to 0, by default it allows 1 request.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.memcached.set-async.circuit-breaker.half-open-max-requests" + }, + "min_requests": { + "default": 50, + "description": "Minimal requests to trigger the circuit breaker.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.memcached.set-async.circuit-breaker.min-requests" + }, + "open_duration": { + "default": "5s", + "description": "Period of the open state after which the state of the circuit breaker becomes half-open. If set to 0, by default open duration is 60 seconds.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.memcached.set-async.circuit-breaker.open-duration", + "x-format": "duration" + } + }, + "type": "object" + }, + "timeout": { + "default": "100ms", + "description": "The socket read/write timeout.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.memcached.timeout", + "x-format": "duration" + } + }, + "type": "object" + }, + "metafile_attributes_ttl": { + "default": "168h0m0s", + "description": "How long to cache attributes of the block metafile.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.metafile-attributes-ttl", + "x-format": "duration" + }, + "metafile_content_ttl": { + "default": "24h0m0s", + "description": "How long to cache content of the metafile.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.metafile-content-ttl", + "x-format": "duration" + }, + "metafile_doesnt_exist_ttl": { + "default": "5m0s", + "description": "How long to cache information that block metafile doesn't exist. Also used for user deletion mark file.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.metafile-doesnt-exist-ttl", + "x-format": "duration" + }, + "metafile_exists_ttl": { + "default": "2h0m0s", + "description": "How long to cache information that block metafile exists. Also used for user deletion mark file.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.metafile-exists-ttl", + "x-format": "duration" + }, + "metafile_max_size_bytes": { + "default": 1048576, + "description": "Maximum size of metafile content to cache in bytes. Caching will be skipped if the content exceeds this size. This is useful to avoid network round trip for large content if the configured caching backend has an hard limit on cached items size (in this case, you should set this limit to the same limit in the caching backend).", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.metafile-max-size-bytes" + }, + "multilevel": { + "properties": { + "max_async_buffer_size": { + "default": 10000, + "description": "The maximum number of enqueued asynchronous operations allowed when backfilling cache items.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.multilevel.max-async-buffer-size" + }, + "max_async_concurrency": { + "default": 3, + "description": "The maximum number of concurrent asynchronous operations can occur when backfilling cache items.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.multilevel.max-async-concurrency" + }, + "max_backfill_items": { + "default": 10000, + "description": "The maximum number of items to backfill per asynchronous operation.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.multilevel.max-backfill-items" + } + }, + "type": "object" + }, + "partitioned_groups_list_ttl": { + "default": "0s", + "description": "How long to cache list of partitioned groups for an user. 0 disables caching", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.partitioned-groups-list-ttl", + "x-format": "duration" + }, + "redis": { + "properties": { + "addresses": { + "description": "Comma separated list of redis addresses. Supported prefixes are: dns+ (looked up as an A/AAAA query), dnssrv+ (looked up as a SRV query, dnssrvnoa+ (looked up as a SRV query, with no A/AAAA lookup made after that).", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.redis.addresses" + }, + "cache_size": { + "default": 0, + "description": "If not zero then client-side caching is enabled. Client-side caching is when data is stored in memory instead of fetching data each time. See https://redis.io/docs/manual/client-side-caching/ for more info.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.redis.cache-size" + }, + "db": { + "default": 0, + "description": "Database to be selected after connecting to the server.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.redis.db" + }, + "dial_timeout": { + "default": "5s", + "description": "Client dial timeout.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.redis.dial-timeout", + "x-format": "duration" + }, + "get_multi_batch_size": { + "default": 100, + "description": "The maximum size per batch for mget.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.redis.get-multi-batch-size" + }, + "master_name": { + "description": "Specifies the master's name. Must be not empty for Redis Sentinel.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.redis.master-name" + }, + "max_async_buffer_size": { + "default": 10000, + "description": "The maximum number of enqueued asynchronous operations allowed.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.redis.max-async-buffer-size" + }, + "max_async_concurrency": { + "default": 3, + "description": "The maximum number of concurrent asynchronous operations can occur.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.redis.max-async-concurrency" + }, + "max_get_multi_concurrency": { + "default": 100, + "description": "The maximum number of concurrent GetMulti() operations. If set to 0, concurrency is unlimited.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.redis.max-get-multi-concurrency" + }, + "max_set_multi_concurrency": { + "default": 100, + "description": "The maximum number of concurrent SetMulti() operations. If set to 0, concurrency is unlimited.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.redis.max-set-multi-concurrency" + }, + "password": { + "description": "Redis password.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.redis.password" + }, + "read_timeout": { + "default": "3s", + "description": "Client read timeout.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.redis.read-timeout", + "x-format": "duration" + }, + "set_async_circuit_breaker_config": { + "properties": { + "consecutive_failures": { + "default": 5, + "description": "Consecutive failures to determine if the circuit breaker should open.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.redis.set-async.circuit-breaker.consecutive-failures" + }, + "enabled": { + "default": false, + "description": "If true, enable circuit breaker.", + "type": "boolean", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.redis.set-async.circuit-breaker.enabled" + }, + "failure_percent": { + "default": 0.05, + "description": "Failure percentage to determine if the circuit breaker should open.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.redis.set-async.circuit-breaker.failure-percent" + }, + "half_open_max_requests": { + "default": 10, + "description": "Maximum number of requests allowed to pass through when the circuit breaker is half-open. If set to 0, by default it allows 1 request.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.redis.set-async.circuit-breaker.half-open-max-requests" + }, + "min_requests": { + "default": 50, + "description": "Minimal requests to trigger the circuit breaker.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.redis.set-async.circuit-breaker.min-requests" + }, + "open_duration": { + "default": "5s", + "description": "Period of the open state after which the state of the circuit breaker becomes half-open. If set to 0, by default open duration is 60 seconds.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.redis.set-async.circuit-breaker.open-duration", + "x-format": "duration" + } + }, + "type": "object" + }, + "set_multi_batch_size": { + "default": 100, + "description": "The maximum size per batch for pipeline set.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.redis.set-multi-batch-size" + }, + "tls_ca_path": { + "description": "Path to the CA certificates file to validate server certificate against. If not set, the host's root CA certificates are used.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.redis.tls-ca-path" + }, + "tls_cert_path": { + "description": "Path to the client certificate file, which will be used for authenticating with the server. Also requires the key path to be configured.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.redis.tls-cert-path" + }, + "tls_enabled": { + "default": false, + "description": "Whether to enable tls for redis connection.", + "type": "boolean", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.redis.tls-enabled" + }, + "tls_insecure_skip_verify": { + "default": false, + "description": "Skip validating server certificate.", + "type": "boolean", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.redis.tls-insecure-skip-verify" + }, + "tls_key_path": { + "description": "Path to the key file for the client certificate. Also requires the client certificate to be configured.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.redis.tls-key-path" + }, + "tls_server_name": { + "description": "Override the expected name on the server certificate.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.redis.tls-server-name" + }, + "username": { + "description": "Redis username.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.redis.username" + }, + "write_timeout": { + "default": "3s", + "description": "Client write timeout.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.redis.write-timeout", + "x-format": "duration" + } + }, + "type": "object" + }, + "tenant_blocks_list_ttl": { + "default": "5m0s", + "description": "How long to cache list of blocks for each tenant.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.tenant-blocks-list-ttl", + "x-format": "duration" + }, + "tenants_list_ttl": { + "default": "15m0s", + "description": "How long to cache list of tenants in the bucket.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.tenants-list-ttl", + "x-format": "duration" + } + }, + "type": "object" + }, + "parquet_labels_cache": { + "properties": { + "attributes_ttl": { + "default": "168h0m0s", + "description": "TTL for caching object attributes for parquet labels file.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.parquet-labels-cache.attributes-ttl", + "x-format": "duration" + }, + "backend": { + "description": "The parquet labels cache backend type. Single or Multiple cache backend can be provided. Supported values in single cache: memcached, redis, inmemory, and '' (disable). Supported values in multi level cache: a comma-separated list of (inmemory, memcached, redis)", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.parquet-labels-cache.backend" + }, + "inmemory": { + "properties": { + "max_size_bytes": { + "default": 1073741824, + "description": "Maximum size in bytes of in-memory parquet-labels cache used (shared between all tenants).", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.parquet-labels-cache.inmemory.max-size-bytes" + } + }, + "type": "object" + }, + "max_get_range_requests": { + "default": 3, + "description": "Maximum number of sub-GetRange requests that a single GetRange request can be split into when fetching parquet labels file. Zero or negative value = unlimited number of sub-requests.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.parquet-labels-cache.max-get-range-requests" + }, + "memcached": { + "properties": { + "addresses": { + "description": "Comma separated list of memcached addresses. Supported prefixes are: dns+ (looked up as an A/AAAA query), dnssrv+ (looked up as a SRV query, dnssrvnoa+ (looked up as a SRV query, with no A/AAAA lookup made after that).", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.parquet-labels-cache.memcached.addresses" + }, + "auto_discovery": { + "default": false, + "description": "Use memcached auto-discovery mechanism provided by some cloud provider like GCP and AWS", + "type": "boolean", + "x-cli-flag": "blocks-storage.bucket-store.parquet-labels-cache.memcached.auto-discovery" + }, + "max_async_buffer_size": { + "default": 10000, + "description": "The maximum number of enqueued asynchronous operations allowed.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.parquet-labels-cache.memcached.max-async-buffer-size" + }, + "max_async_concurrency": { + "default": 3, + "description": "The maximum number of concurrent asynchronous operations can occur.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.parquet-labels-cache.memcached.max-async-concurrency" + }, + "max_get_multi_batch_size": { + "default": 0, + "description": "The maximum number of keys a single underlying get operation should run. If more keys are specified, internally keys are split into multiple batches and fetched concurrently, honoring the max concurrency. If set to 0, the max batch size is unlimited.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.parquet-labels-cache.memcached.max-get-multi-batch-size" + }, + "max_get_multi_concurrency": { + "default": 100, + "description": "The maximum number of concurrent connections running get operations. If set to 0, concurrency is unlimited.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.parquet-labels-cache.memcached.max-get-multi-concurrency" + }, + "max_idle_connections": { + "default": 16, + "description": "The maximum number of idle connections that will be maintained per address.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.parquet-labels-cache.memcached.max-idle-connections" + }, + "max_item_size": { + "default": 1048576, + "description": "The maximum size of an item stored in memcached. Bigger items are not stored. If set to 0, no maximum size is enforced.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.parquet-labels-cache.memcached.max-item-size" + }, + "set_async_circuit_breaker_config": { + "properties": { + "consecutive_failures": { + "default": 5, + "description": "Consecutive failures to determine if the circuit breaker should open.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.parquet-labels-cache.memcached.set-async.circuit-breaker.consecutive-failures" + }, + "enabled": { + "default": false, + "description": "If true, enable circuit breaker.", + "type": "boolean", + "x-cli-flag": "blocks-storage.bucket-store.parquet-labels-cache.memcached.set-async.circuit-breaker.enabled" + }, + "failure_percent": { + "default": 0.05, + "description": "Failure percentage to determine if the circuit breaker should open.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.parquet-labels-cache.memcached.set-async.circuit-breaker.failure-percent" + }, + "half_open_max_requests": { + "default": 10, + "description": "Maximum number of requests allowed to pass through when the circuit breaker is half-open. If set to 0, by default it allows 1 request.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.parquet-labels-cache.memcached.set-async.circuit-breaker.half-open-max-requests" + }, + "min_requests": { + "default": 50, + "description": "Minimal requests to trigger the circuit breaker.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.parquet-labels-cache.memcached.set-async.circuit-breaker.min-requests" + }, + "open_duration": { + "default": "5s", + "description": "Period of the open state after which the state of the circuit breaker becomes half-open. If set to 0, by default open duration is 60 seconds.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.parquet-labels-cache.memcached.set-async.circuit-breaker.open-duration", + "x-format": "duration" + } + }, + "type": "object" + }, + "timeout": { + "default": "100ms", + "description": "The socket read/write timeout.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.parquet-labels-cache.memcached.timeout", + "x-format": "duration" + } + }, + "type": "object" + }, + "multilevel": { + "properties": { + "max_async_buffer_size": { + "default": 10000, + "description": "The maximum number of enqueued asynchronous operations allowed when backfilling cache items.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.parquet-labels-cache.multilevel.max-async-buffer-size" + }, + "max_async_concurrency": { + "default": 3, + "description": "The maximum number of concurrent asynchronous operations can occur when backfilling cache items.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.parquet-labels-cache.multilevel.max-async-concurrency" + }, + "max_backfill_items": { + "default": 10000, + "description": "The maximum number of items to backfill per asynchronous operation.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.parquet-labels-cache.multilevel.max-backfill-items" + } + }, + "type": "object" + }, + "redis": { + "properties": { + "addresses": { + "description": "Comma separated list of redis addresses. Supported prefixes are: dns+ (looked up as an A/AAAA query), dnssrv+ (looked up as a SRV query, dnssrvnoa+ (looked up as a SRV query, with no A/AAAA lookup made after that).", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.parquet-labels-cache.redis.addresses" + }, + "cache_size": { + "default": 0, + "description": "If not zero then client-side caching is enabled. Client-side caching is when data is stored in memory instead of fetching data each time. See https://redis.io/docs/manual/client-side-caching/ for more info.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.parquet-labels-cache.redis.cache-size" + }, + "db": { + "default": 0, + "description": "Database to be selected after connecting to the server.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.parquet-labels-cache.redis.db" + }, + "dial_timeout": { + "default": "5s", + "description": "Client dial timeout.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.parquet-labels-cache.redis.dial-timeout", + "x-format": "duration" + }, + "get_multi_batch_size": { + "default": 100, + "description": "The maximum size per batch for mget.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.parquet-labels-cache.redis.get-multi-batch-size" + }, + "master_name": { + "description": "Specifies the master's name. Must be not empty for Redis Sentinel.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.parquet-labels-cache.redis.master-name" + }, + "max_async_buffer_size": { + "default": 10000, + "description": "The maximum number of enqueued asynchronous operations allowed.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.parquet-labels-cache.redis.max-async-buffer-size" + }, + "max_async_concurrency": { + "default": 3, + "description": "The maximum number of concurrent asynchronous operations can occur.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.parquet-labels-cache.redis.max-async-concurrency" + }, + "max_get_multi_concurrency": { + "default": 100, + "description": "The maximum number of concurrent GetMulti() operations. If set to 0, concurrency is unlimited.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.parquet-labels-cache.redis.max-get-multi-concurrency" + }, + "max_set_multi_concurrency": { + "default": 100, + "description": "The maximum number of concurrent SetMulti() operations. If set to 0, concurrency is unlimited.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.parquet-labels-cache.redis.max-set-multi-concurrency" + }, + "password": { + "description": "Redis password.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.parquet-labels-cache.redis.password" + }, + "read_timeout": { + "default": "3s", + "description": "Client read timeout.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.parquet-labels-cache.redis.read-timeout", + "x-format": "duration" + }, + "set_async_circuit_breaker_config": { + "properties": { + "consecutive_failures": { + "default": 5, + "description": "Consecutive failures to determine if the circuit breaker should open.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.parquet-labels-cache.redis.set-async.circuit-breaker.consecutive-failures" + }, + "enabled": { + "default": false, + "description": "If true, enable circuit breaker.", + "type": "boolean", + "x-cli-flag": "blocks-storage.bucket-store.parquet-labels-cache.redis.set-async.circuit-breaker.enabled" + }, + "failure_percent": { + "default": 0.05, + "description": "Failure percentage to determine if the circuit breaker should open.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.parquet-labels-cache.redis.set-async.circuit-breaker.failure-percent" + }, + "half_open_max_requests": { + "default": 10, + "description": "Maximum number of requests allowed to pass through when the circuit breaker is half-open. If set to 0, by default it allows 1 request.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.parquet-labels-cache.redis.set-async.circuit-breaker.half-open-max-requests" + }, + "min_requests": { + "default": 50, + "description": "Minimal requests to trigger the circuit breaker.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.parquet-labels-cache.redis.set-async.circuit-breaker.min-requests" + }, + "open_duration": { + "default": "5s", + "description": "Period of the open state after which the state of the circuit breaker becomes half-open. If set to 0, by default open duration is 60 seconds.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.parquet-labels-cache.redis.set-async.circuit-breaker.open-duration", + "x-format": "duration" + } + }, + "type": "object" + }, + "set_multi_batch_size": { + "default": 100, + "description": "The maximum size per batch for pipeline set.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.parquet-labels-cache.redis.set-multi-batch-size" + }, + "tls_ca_path": { + "description": "Path to the CA certificates file to validate server certificate against. If not set, the host's root CA certificates are used.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.parquet-labels-cache.redis.tls-ca-path" + }, + "tls_cert_path": { + "description": "Path to the client certificate file, which will be used for authenticating with the server. Also requires the key path to be configured.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.parquet-labels-cache.redis.tls-cert-path" + }, + "tls_enabled": { + "default": false, + "description": "Whether to enable tls for redis connection.", + "type": "boolean", + "x-cli-flag": "blocks-storage.bucket-store.parquet-labels-cache.redis.tls-enabled" + }, + "tls_insecure_skip_verify": { + "default": false, + "description": "Skip validating server certificate.", + "type": "boolean", + "x-cli-flag": "blocks-storage.bucket-store.parquet-labels-cache.redis.tls-insecure-skip-verify" + }, + "tls_key_path": { + "description": "Path to the key file for the client certificate. Also requires the client certificate to be configured.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.parquet-labels-cache.redis.tls-key-path" + }, + "tls_server_name": { + "description": "Override the expected name on the server certificate.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.parquet-labels-cache.redis.tls-server-name" + }, + "username": { + "description": "Redis username.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.parquet-labels-cache.redis.username" + }, + "write_timeout": { + "default": "3s", + "description": "Client write timeout.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.parquet-labels-cache.redis.write-timeout", + "x-format": "duration" + } + }, + "type": "object" + }, + "subrange_size": { + "default": 16000, + "description": "Size of each subrange that bucket object is split into for better caching.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.parquet-labels-cache.subrange-size" + }, + "subrange_ttl": { + "default": "24h0m0s", + "description": "TTL for caching individual subranges.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.parquet-labels-cache.subrange-ttl", + "x-format": "duration" + } + }, + "type": "object" + }, + "series_batch_size": { + "default": 10000, + "description": "Controls how many series to fetch per batch in Store Gateway. Default value is 10000.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.series-batch-size" + }, + "sync_dir": { + "default": "tsdb-sync", + "description": "Directory to store synchronized TSDB index headers.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.sync-dir" + }, + "sync_interval": { + "default": "15m0s", + "description": "How frequently to scan the bucket, or to refresh the bucket index (if enabled), in order to look for changes (new blocks shipped by ingesters and blocks deleted by retention or compaction).", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.sync-interval", + "x-format": "duration" + }, + "tenant_sync_concurrency": { + "default": 10, + "description": "Maximum number of concurrent tenants syncing blocks.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.tenant-sync-concurrency" + }, + "token_bucket_bytes_limiter": { + "properties": { + "instance_token_bucket_size": { + "default": 859832320, + "description": "Instance token bucket size", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.token-bucket-bytes-limiter.instance-token-bucket-size" + }, + "mode": { + "default": "disabled", + "description": "Token bucket bytes limiter mode. Supported values are: disabled, dryrun, enabled", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.token-bucket-bytes-limiter.mode" + }, + "request_token_bucket_size": { + "default": 4194304, + "description": "Request token bucket size", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.token-bucket-bytes-limiter.request-token-bucket-size" + }, + "user_token_bucket_size": { + "default": 644874240, + "description": "User token bucket size", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.token-bucket-bytes-limiter.user-token-bucket-size" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "filesystem": { + "properties": { + "dir": { + "description": "Local filesystem storage directory.", + "type": "string", + "x-cli-flag": "blocks-storage.filesystem.dir" + } + }, + "type": "object" + }, + "gcs": { + "properties": { + "bucket_name": { + "description": "GCS bucket name", + "type": "string", + "x-cli-flag": "blocks-storage.gcs.bucket-name" + }, + "service_account": { + "description": "JSON representing either a Google Developers Console client_credentials.json file or a Google Developers service account key file. If empty, fallback to Google default logic.", + "type": "string", + "x-cli-flag": "blocks-storage.gcs.service-account" + } + }, + "type": "object" + }, + "s3": { + "properties": { + "access_key_id": { + "description": "S3 access key ID", + "type": "string", + "x-cli-flag": "blocks-storage.s3.access-key-id" + }, + "bucket_lookup_type": { + "default": "auto", + "description": "The s3 bucket lookup style. Supported values are: auto, virtual-hosted, path.", + "type": "string", + "x-cli-flag": "blocks-storage.s3.bucket-lookup-type" + }, + "bucket_name": { + "description": "S3 bucket name", + "type": "string", + "x-cli-flag": "blocks-storage.s3.bucket-name" + }, + "disable_dualstack": { + "default": false, + "description": "If enabled, S3 endpoint will use the non-dualstack variant.", + "type": "boolean", + "x-cli-flag": "blocks-storage.s3.disable-dualstack" + }, + "endpoint": { + "description": "The S3 bucket endpoint. It could be an AWS S3 endpoint listed at https://docs.aws.amazon.com/general/latest/gr/s3.html or the address of an S3-compatible service in hostname:port format.", + "type": "string", + "x-cli-flag": "blocks-storage.s3.endpoint" + }, + "http": { + "properties": { + "expect_continue_timeout": { + "default": "1s", + "description": "The time to wait for a server's first response headers after fully writing the request headers if the request has an Expect header. 0 to send the request body immediately.", + "type": "string", + "x-cli-flag": "blocks-storage.s3.expect-continue-timeout", + "x-format": "duration" + }, + "idle_conn_timeout": { + "default": "1m30s", + "description": "The time an idle connection will remain idle before closing.", + "type": "string", + "x-cli-flag": "blocks-storage.s3.http.idle-conn-timeout", + "x-format": "duration" + }, + "insecure_skip_verify": { + "default": false, + "description": "If the client connects via HTTPS and this option is enabled, the client will accept any certificate and hostname.", + "type": "boolean", + "x-cli-flag": "blocks-storage.s3.http.insecure-skip-verify" + }, + "max_connections_per_host": { + "default": 0, + "description": "Maximum number of connections per host. 0 means no limit.", + "type": "number", + "x-cli-flag": "blocks-storage.s3.max-connections-per-host" + }, + "max_idle_connections": { + "default": 100, + "description": "Maximum number of idle (keep-alive) connections across all hosts. 0 means no limit.", + "type": "number", + "x-cli-flag": "blocks-storage.s3.max-idle-connections" + }, + "max_idle_connections_per_host": { + "default": 100, + "description": "Maximum number of idle (keep-alive) connections to keep per-host. If 0, a built-in default value is used.", + "type": "number", + "x-cli-flag": "blocks-storage.s3.max-idle-connections-per-host" + }, + "response_header_timeout": { + "default": "2m0s", + "description": "The amount of time the client will wait for a servers response headers.", + "type": "string", + "x-cli-flag": "blocks-storage.s3.http.response-header-timeout", + "x-format": "duration" + }, + "tls_handshake_timeout": { + "default": "10s", + "description": "Maximum time to wait for a TLS handshake. 0 means no limit.", + "type": "string", + "x-cli-flag": "blocks-storage.s3.tls-handshake-timeout", + "x-format": "duration" + } + }, + "type": "object" + }, + "insecure": { + "default": false, + "description": "If enabled, use http:// for the S3 endpoint instead of https://. This could be useful in local dev/test environments while using an S3-compatible backend storage, like Minio.", + "type": "boolean", + "x-cli-flag": "blocks-storage.s3.insecure" + }, + "list_objects_version": { + "description": "The list api version. Supported values are: v1, v2, and ''.", + "type": "string", + "x-cli-flag": "blocks-storage.s3.list-objects-version" + }, + "region": { + "description": "S3 region. If unset, the client will issue a S3 GetBucketLocation API call to autodetect it.", + "type": "string", + "x-cli-flag": "blocks-storage.s3.region" + }, + "secret_access_key": { + "description": "S3 secret access key", + "type": "string", + "x-cli-flag": "blocks-storage.s3.secret-access-key" + }, + "send_content_md5": { + "default": true, + "description": "If true, attach MD5 checksum when upload objects and S3 uses MD5 checksum algorithm to verify the provided digest. If false, use CRC32C algorithm instead.", + "type": "boolean", + "x-cli-flag": "blocks-storage.s3.send-content-md5" + }, + "signature_version": { + "default": "v4", + "description": "The signature version to use for authenticating against S3. Supported values are: v4, v2.", + "type": "string", + "x-cli-flag": "blocks-storage.s3.signature-version" + }, + "sse": { + "$ref": "#/definitions/s3_sse_config" + } + }, + "type": "object" + }, + "swift": { + "properties": { + "application_credential_id": { + "description": "OpenStack Swift application credential ID.", + "type": "string", + "x-cli-flag": "blocks-storage.swift.application-credential-id" + }, + "application_credential_name": { + "description": "OpenStack Swift application credential name.", + "type": "string", + "x-cli-flag": "blocks-storage.swift.application-credential-name" + }, + "application_credential_secret": { + "description": "OpenStack Swift application credential secret.", + "type": "string", + "x-cli-flag": "blocks-storage.swift.application-credential-secret" + }, + "auth_url": { + "description": "OpenStack Swift authentication URL", + "type": "string", + "x-cli-flag": "blocks-storage.swift.auth-url" + }, + "auth_version": { + "default": 0, + "description": "OpenStack Swift authentication API version. 0 to autodetect.", + "type": "number", + "x-cli-flag": "blocks-storage.swift.auth-version" + }, + "connect_timeout": { + "default": "10s", + "description": "Time after which a connection attempt is aborted.", + "type": "string", + "x-cli-flag": "blocks-storage.swift.connect-timeout", + "x-format": "duration" + }, + "container_name": { + "description": "Name of the OpenStack Swift container to put chunks in.", + "type": "string", + "x-cli-flag": "blocks-storage.swift.container-name" + }, + "domain_id": { + "description": "OpenStack Swift user's domain ID.", + "type": "string", + "x-cli-flag": "blocks-storage.swift.domain-id" + }, + "domain_name": { + "description": "OpenStack Swift user's domain name.", + "type": "string", + "x-cli-flag": "blocks-storage.swift.domain-name" + }, + "max_retries": { + "default": 3, + "description": "Max retries on requests error.", + "type": "number", + "x-cli-flag": "blocks-storage.swift.max-retries" + }, + "password": { + "description": "OpenStack Swift API key.", + "type": "string", + "x-cli-flag": "blocks-storage.swift.password" + }, + "project_domain_id": { + "description": "ID of the OpenStack Swift project's domain (v3 auth only), only needed if it differs the from user domain.", + "type": "string", + "x-cli-flag": "blocks-storage.swift.project-domain-id" + }, + "project_domain_name": { + "description": "Name of the OpenStack Swift project's domain (v3 auth only), only needed if it differs from the user domain.", + "type": "string", + "x-cli-flag": "blocks-storage.swift.project-domain-name" + }, + "project_id": { + "description": "OpenStack Swift project ID (v2,v3 auth only).", + "type": "string", + "x-cli-flag": "blocks-storage.swift.project-id" + }, + "project_name": { + "description": "OpenStack Swift project name (v2,v3 auth only).", + "type": "string", + "x-cli-flag": "blocks-storage.swift.project-name" + }, + "region_name": { + "description": "OpenStack Swift Region to use (v2,v3 auth only).", + "type": "string", + "x-cli-flag": "blocks-storage.swift.region-name" + }, + "request_timeout": { + "default": "5s", + "description": "Time after which an idle request is aborted. The timeout watchdog is reset each time some data is received, so the timeout triggers after X time no data is received on a request.", + "type": "string", + "x-cli-flag": "blocks-storage.swift.request-timeout", + "x-format": "duration" + }, + "user_domain_id": { + "description": "OpenStack Swift user's domain ID.", + "type": "string", + "x-cli-flag": "blocks-storage.swift.user-domain-id" + }, + "user_domain_name": { + "description": "OpenStack Swift user's domain name.", + "type": "string", + "x-cli-flag": "blocks-storage.swift.user-domain-name" + }, + "user_id": { + "description": "OpenStack Swift user ID.", + "type": "string", + "x-cli-flag": "blocks-storage.swift.user-id" + }, + "username": { + "description": "OpenStack Swift username.", + "type": "string", + "x-cli-flag": "blocks-storage.swift.username" + } + }, + "type": "object" + }, + "tsdb": { + "properties": { + "block_ranges_period": { + "default": "2h0m0s", + "description": "TSDB blocks range period.", + "items": { + "type": "string" + }, + "type": "array", + "x-cli-flag": "blocks-storage.tsdb.block-ranges-period" + }, + "close_idle_tsdb_timeout": { + "default": "0s", + "description": "If TSDB has not received any data for this duration, and all blocks from TSDB have been shipped, TSDB is closed and deleted from local disk. If set to positive value, this value should be equal or higher than -querier.query-ingesters-within flag to make sure that TSDB is not closed prematurely, which could cause partial query results. 0 or negative value disables closing of idle TSDB.", + "type": "string", + "x-cli-flag": "blocks-storage.tsdb.close-idle-tsdb-timeout", + "x-format": "duration" + }, + "dir": { + "default": "tsdb", + "description": "Local directory to store TSDBs in the ingesters.", + "type": "string", + "x-cli-flag": "blocks-storage.tsdb.dir" + }, + "expanded_postings_cache": { + "description": "[EXPERIMENTAL] If enabled, ingesters will cache expanded postings when querying blocks. Caching can be configured separately for the head and compacted blocks.", + "properties": { + "blocks": { + "description": "If enabled, ingesters will cache expanded postings for the compacted blocks. The cache is shared between all blocks.", + "properties": { + "enabled": { + "default": false, + "description": "Whether the postings cache is enabled or not", + "type": "boolean", + "x-cli-flag": "blocks-storage.expanded_postings_cache.block.enabled" + }, + "max_bytes": { + "default": 10485760, + "description": "Max bytes for postings cache", + "type": "number", + "x-cli-flag": "blocks-storage.expanded_postings_cache.block.max-bytes" + }, + "ttl": { + "default": "10m0s", + "description": "TTL for postings cache", + "type": "string", + "x-cli-flag": "blocks-storage.expanded_postings_cache.block.ttl", + "x-format": "duration" + } + }, + "type": "object" + }, + "head": { + "description": "If enabled, ingesters will cache expanded postings for the head block. Only queries with with an equal matcher for metric __name__ are cached.", + "properties": { + "enabled": { + "default": false, + "description": "Whether the postings cache is enabled or not", + "type": "boolean", + "x-cli-flag": "blocks-storage.expanded_postings_cache.head.enabled" + }, + "max_bytes": { + "default": 10485760, + "description": "Max bytes for postings cache", + "type": "number", + "x-cli-flag": "blocks-storage.expanded_postings_cache.head.max-bytes" + }, + "ttl": { + "default": "10m0s", + "description": "TTL for postings cache", + "type": "string", + "x-cli-flag": "blocks-storage.expanded_postings_cache.head.ttl", + "x-format": "duration" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "flush_blocks_on_shutdown": { + "default": false, + "description": "True to flush blocks to storage on shutdown. If false, incomplete blocks will be reused after restart.", + "type": "boolean", + "x-cli-flag": "blocks-storage.tsdb.flush-blocks-on-shutdown" + }, + "head_chunks_write_buffer_size_bytes": { + "default": 4194304, + "description": "The write buffer size used by the head chunks mapper. Lower values reduce memory utilisation on clusters with a large number of tenants at the cost of increased disk I/O operations.", + "type": "number", + "x-cli-flag": "blocks-storage.tsdb.head-chunks-write-buffer-size-bytes" + }, + "head_chunks_write_queue_size": { + "default": 0, + "description": "The size of the in-memory queue used before flushing chunks to the disk.", + "type": "number", + "x-cli-flag": "blocks-storage.tsdb.head-chunks-write-queue-size" + }, + "head_compaction_concurrency": { + "default": 5, + "description": "Maximum number of tenants concurrently compacting TSDB head into a new block", + "type": "number", + "x-cli-flag": "blocks-storage.tsdb.head-compaction-concurrency" + }, + "head_compaction_idle_timeout": { + "default": "1h0m0s", + "description": "If TSDB head is idle for this duration, it is compacted. Note that up to 25% jitter is added to the value to avoid ingesters compacting concurrently. 0 means disabled.", + "type": "string", + "x-cli-flag": "blocks-storage.tsdb.head-compaction-idle-timeout", + "x-format": "duration" + }, + "head_compaction_interval": { + "default": "1m0s", + "description": "How frequently does Cortex try to compact TSDB head. Block is only created if data covers smallest block range. Must be greater than 0 and max 30 minutes. Note that up to 50% jitter is added to the value for the first compaction to avoid ingesters compacting concurrently.", + "type": "string", + "x-cli-flag": "blocks-storage.tsdb.head-compaction-interval", + "x-format": "duration" + }, + "max_exemplars": { + "default": 0, + "description": "Deprecated, use maxExemplars in limits instead. If the MaxExemplars value in limits is set to zero, cortex will fallback on this value. This setting enables support for exemplars in TSDB and sets the maximum number that will be stored. 0 or less means disabled.", + "type": "number", + "x-cli-flag": "blocks-storage.tsdb.max-exemplars" + }, + "max_tsdb_opening_concurrency_on_startup": { + "default": 10, + "description": "limit the number of concurrently opening TSDB's on startup", + "type": "number", + "x-cli-flag": "blocks-storage.tsdb.max-tsdb-opening-concurrency-on-startup" + }, + "memory_snapshot_on_shutdown": { + "default": false, + "description": "True to enable snapshotting of in-memory TSDB data on disk when shutting down.", + "type": "boolean", + "x-cli-flag": "blocks-storage.tsdb.memory-snapshot-on-shutdown" + }, + "out_of_order_cap_max": { + "default": 32, + "description": "[EXPERIMENTAL] Configures the maximum number of samples per chunk that can be out-of-order.", + "type": "number", + "x-cli-flag": "blocks-storage.tsdb.out-of-order-cap-max" + }, + "retention_period": { + "default": "6h0m0s", + "description": "TSDB blocks retention in the ingester before a block is removed. This should be larger than the block_ranges_period and large enough to give store-gateways and queriers enough time to discover newly uploaded blocks.", + "type": "string", + "x-cli-flag": "blocks-storage.tsdb.retention-period", + "x-format": "duration" + }, + "ship_concurrency": { + "default": 10, + "description": "Maximum number of tenants concurrently shipping blocks to the storage.", + "type": "number", + "x-cli-flag": "blocks-storage.tsdb.ship-concurrency" + }, + "ship_interval": { + "default": "1m0s", + "description": "How frequently the TSDB blocks are scanned and new ones are shipped to the storage. 0 means shipping is disabled.", + "type": "string", + "x-cli-flag": "blocks-storage.tsdb.ship-interval", + "x-format": "duration" + }, + "stripe_size": { + "default": 16384, + "description": "The number of shards of series to use in TSDB (must be a power of 2). Reducing this will decrease memory footprint, but can negatively impact performance.", + "type": "number", + "x-cli-flag": "blocks-storage.tsdb.stripe-size" + }, + "wal_compression_type": { + "description": "TSDB WAL type. Supported values are: 'snappy', 'zstd' and '' (disable compression)", + "type": "string", + "x-cli-flag": "blocks-storage.tsdb.wal-compression-type" + }, + "wal_segment_size_bytes": { + "default": 134217728, + "description": "TSDB WAL segments files max size (bytes).", + "type": "number", + "x-cli-flag": "blocks-storage.tsdb.wal-segment-size-bytes" + } + }, + "type": "object" + }, + "users_scanner": { + "properties": { + "cache_ttl": { + "default": "0s", + "description": "TTL of the cached users. 0 disables caching and relies on caching at bucket client level.", + "type": "string", + "x-cli-flag": "blocks-storage.users-scanner.cache-ttl", + "x-format": "duration" + }, + "max_stale_period": { + "default": "1h0m0s", + "description": "Maximum period of time to consider the user index as stale. Fall back to the base scanner if stale. Only valid when strategy is user_index.", + "type": "string", + "x-cli-flag": "blocks-storage.users-scanner.user-index.max-stale-period", + "x-format": "duration" + }, + "strategy": { + "default": "list", + "description": "Strategy to use to scan users. Supported values are: list, user_index.", + "type": "string", + "x-cli-flag": "blocks-storage.users-scanner.strategy" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "compactor_config": { + "description": "The compactor_config configures the compactor for the blocks storage.", + "properties": { + "accept_malformed_index": { + "default": false, + "description": "When enabled, index verification will ignore out of order label names.", + "type": "boolean", + "x-cli-flag": "compactor.accept-malformed-index" + }, + "block_deletion_marks_migration_enabled": { + "default": false, + "description": "When enabled, at compactor startup the bucket will be scanned and all found deletion marks inside the block location will be copied to the markers global location too. This option can (and should) be safely disabled as soon as the compactor has successfully run at least once.", + "type": "boolean", + "x-cli-flag": "compactor.block-deletion-marks-migration-enabled" + }, + "block_files_concurrency": { + "default": 10, + "description": "Number of goroutines to use when fetching/uploading block files from object storage.", + "type": "number", + "x-cli-flag": "compactor.block-files-concurrency" + }, + "block_ranges": { + "default": "2h0m0s,12h0m0s,24h0m0s", + "description": "List of compaction time ranges.", + "items": { + "type": "string" + }, + "type": "array", + "x-cli-flag": "compactor.block-ranges" + }, + "block_sync_concurrency": { + "default": 20, + "description": "Number of Go routines to use when syncing block index and chunks files from the long term storage.", + "type": "number", + "x-cli-flag": "compactor.block-sync-concurrency" + }, + "blocks_fetch_concurrency": { + "default": 3, + "description": "Number of goroutines to use when fetching blocks from object storage when compacting.", + "type": "number", + "x-cli-flag": "compactor.blocks-fetch-concurrency" + }, + "caching_bucket_enabled": { + "default": false, + "description": "When enabled, caching bucket will be used for compactor, except cleaner service, which serves as the source of truth for block status", + "type": "boolean", + "x-cli-flag": "compactor.caching-bucket-enabled" + }, + "cleaner_caching_bucket_enabled": { + "default": false, + "description": "When enabled, caching bucket will be used for cleaner", + "type": "boolean", + "x-cli-flag": "compactor.cleaner-caching-bucket-enabled" + }, + "cleaner_visit_marker_file_update_interval": { + "default": "5m0s", + "description": "How frequently cleaner visit marker file should be updated when cleaning user.", + "type": "string", + "x-cli-flag": "compactor.cleaner-visit-marker-file-update-interval", + "x-format": "duration" + }, + "cleaner_visit_marker_timeout": { + "default": "10m0s", + "description": "How long cleaner visit marker file should be considered as expired and able to be picked up by cleaner again. The value should be smaller than -compactor.cleanup-interval", + "type": "string", + "x-cli-flag": "compactor.cleaner-visit-marker-timeout", + "x-format": "duration" + }, + "cleanup_concurrency": { + "default": 20, + "description": "Max number of tenants for which blocks cleanup and maintenance should run concurrently.", + "type": "number", + "x-cli-flag": "compactor.cleanup-concurrency" + }, + "cleanup_interval": { + "default": "15m0s", + "description": "How frequently compactor should run blocks cleanup and maintenance, as well as update the bucket index.", + "type": "string", + "x-cli-flag": "compactor.cleanup-interval", + "x-format": "duration" + }, + "compaction_concurrency": { + "default": 1, + "description": "Max number of concurrent compactions running.", + "type": "number", + "x-cli-flag": "compactor.compaction-concurrency" + }, + "compaction_interval": { + "default": "1h0m0s", + "description": "The frequency at which the compaction runs", + "type": "string", + "x-cli-flag": "compactor.compaction-interval", + "x-format": "duration" + }, + "compaction_retries": { + "default": 3, + "description": "How many times to retry a failed compaction within a single compaction run.", + "type": "number", + "x-cli-flag": "compactor.compaction-retries" + }, + "compaction_strategy": { + "default": "default", + "description": "The compaction strategy to use. Supported values are: default, partitioning.", + "type": "string", + "x-cli-flag": "compactor.compaction-strategy" + }, + "compaction_visit_marker_file_update_interval": { + "default": "1m0s", + "description": "How frequently compaction visit marker file should be updated duration compaction.", + "type": "string", + "x-cli-flag": "compactor.compaction-visit-marker-file-update-interval", + "x-format": "duration" + }, + "compaction_visit_marker_timeout": { + "default": "10m0s", + "description": "How long compaction visit marker file should be considered as expired and able to be picked up by compactor again.", + "type": "string", + "x-cli-flag": "compactor.compaction-visit-marker-timeout", + "x-format": "duration" + }, + "consistency_delay": { + "default": "0s", + "description": "Minimum age of fresh (non-compacted) blocks before they are being processed. Malformed blocks older than the maximum of consistency-delay and 48h0m0s will be removed.", + "type": "string", + "x-cli-flag": "compactor.consistency-delay", + "x-format": "duration" + }, + "data_dir": { + "default": "./data", + "description": "Data directory in which to cache blocks and process compactions", + "type": "string", + "x-cli-flag": "compactor.data-dir" + }, + "deletion_delay": { + "default": "12h0m0s", + "description": "Time before a block marked for deletion is deleted from bucket. If not 0, blocks will be marked for deletion and compactor component will permanently delete blocks marked for deletion from the bucket. If 0, blocks will be deleted straight away. Note that deleting blocks immediately can cause query failures.", + "type": "string", + "x-cli-flag": "compactor.deletion-delay", + "x-format": "duration" + }, + "disabled_tenants": { + "description": "Comma separated list of tenants that cannot be compacted by this compactor. If specified, and compactor would normally pick given tenant for compaction (via -compactor.enabled-tenants or sharding), it will be ignored instead.", + "type": "string", + "x-cli-flag": "compactor.disabled-tenants" + }, + "enabled_tenants": { + "description": "Comma separated list of tenants that can be compacted. If specified, only these tenants will be compacted by compactor, otherwise all tenants can be compacted. Subject to sharding.", + "type": "string", + "x-cli-flag": "compactor.enabled-tenants" + }, + "meta_sync_concurrency": { + "default": 20, + "description": "Number of Go routines to use when syncing block meta files from the long term storage.", + "type": "number", + "x-cli-flag": "compactor.meta-sync-concurrency" + }, + "sharding_enabled": { + "default": false, + "description": "Shard tenants across multiple compactor instances. Sharding is required if you run multiple compactor instances, in order to coordinate compactions and avoid race conditions leading to the same tenant blocks simultaneously compacted by different instances.", + "type": "boolean", + "x-cli-flag": "compactor.sharding-enabled" + }, + "sharding_planner_delay": { + "default": "10s", + "description": "How long shuffle sharding planner would wait before running planning code. This delay would prevent double compaction when two compactors claimed same partition in grouper at same time.", + "type": "string", + "x-cli-flag": "compactor.sharding-planner-delay", + "x-format": "duration" + }, + "sharding_ring": { + "properties": { + "auto_forget_delay": { + "default": "2m0s", + "description": "Time since last heartbeat before compactor will be removed from ring. 0 to disable", + "type": "string", + "x-cli-flag": "compactor.auto-forget-delay", + "x-format": "duration" + }, + "detailed_metrics_enabled": { + "default": true, + "description": "Set to true to enable ring detailed metrics. These metrics provide detailed information, such as token count and ownership per tenant. Disabling them can significantly decrease the number of metrics emitted.", + "type": "boolean", + "x-cli-flag": "compactor.ring.detailed-metrics-enabled" + }, + "heartbeat_period": { + "default": "5s", + "description": "Period at which to heartbeat to the ring. 0 = disabled.", + "type": "string", + "x-cli-flag": "compactor.ring.heartbeat-period", + "x-format": "duration" + }, + "heartbeat_timeout": { + "default": "1m0s", + "description": "The heartbeat timeout after which compactors are considered unhealthy within the ring. 0 = never (timeout disabled).", + "type": "string", + "x-cli-flag": "compactor.ring.heartbeat-timeout", + "x-format": "duration" + }, + "instance_interface_names": { + "default": "[eth0 en0]", + "description": "Name of network interface to read address from.", + "items": { + "type": "string" + }, + "type": "array", + "x-cli-flag": "compactor.ring.instance-interface-names" + }, + "kvstore": { + "properties": { + "consul": { + "$ref": "#/definitions/consul_config" + }, + "dynamodb": { + "properties": { + "max_cas_retries": { + "default": 10, + "description": "Maximum number of retries for DDB KV CAS.", + "type": "number", + "x-cli-flag": "compactor.ring.dynamodb.max-cas-retries" + }, + "puller_sync_time": { + "default": "1m0s", + "description": "Time to refresh local ring with information on dynamodb.", + "type": "string", + "x-cli-flag": "compactor.ring.dynamodb.puller-sync-time", + "x-format": "duration" + }, + "region": { + "description": "Region to access dynamodb.", + "type": "string", + "x-cli-flag": "compactor.ring.dynamodb.region" + }, + "table_name": { + "description": "Table name to use on dynamodb.", + "type": "string", + "x-cli-flag": "compactor.ring.dynamodb.table-name" + }, + "timeout": { + "default": "2m0s", + "description": "Timeout of dynamoDbClient requests. Default is 2m.", + "type": "string", + "x-cli-flag": "compactor.ring.dynamodb.timeout", + "x-format": "duration" + }, + "ttl": { + "default": "0s", + "description": "Time to expire items on dynamodb.", + "type": "string", + "x-cli-flag": "compactor.ring.dynamodb.ttl-time", + "x-format": "duration" + } + }, + "type": "object" + }, + "etcd": { + "$ref": "#/definitions/etcd_config" + }, + "multi": { + "properties": { + "mirror_enabled": { + "default": false, + "description": "Mirror writes to secondary store.", + "type": "boolean", + "x-cli-flag": "compactor.ring.multi.mirror-enabled" + }, + "mirror_timeout": { + "default": "2s", + "description": "Timeout for storing value to secondary store.", + "type": "string", + "x-cli-flag": "compactor.ring.multi.mirror-timeout", + "x-format": "duration" + }, + "primary": { + "description": "Primary backend storage used by multi-client.", + "type": "string", + "x-cli-flag": "compactor.ring.multi.primary" + }, + "secondary": { + "description": "Secondary backend storage used by multi-client.", + "type": "string", + "x-cli-flag": "compactor.ring.multi.secondary" + } + }, + "type": "object" + }, + "prefix": { + "default": "collectors/", + "description": "The prefix for the keys in the store. Should end with a /.", + "type": "string", + "x-cli-flag": "compactor.ring.prefix" + }, + "store": { + "default": "consul", + "description": "Backend storage to use for the ring. Supported values are: consul, dynamodb, etcd, inmemory, memberlist, multi.", + "type": "string", + "x-cli-flag": "compactor.ring.store" + } + }, + "type": "object" + }, + "tokens_file_path": { + "description": "File path where tokens are stored. If empty, tokens are not stored at shutdown and restored at startup.", + "type": "string", + "x-cli-flag": "compactor.ring.tokens-file-path" + }, + "unregister_on_shutdown": { + "default": true, + "description": "Unregister the compactor during shutdown if true.", + "type": "boolean", + "x-cli-flag": "compactor.ring.unregister-on-shutdown" + }, + "wait_active_instance_timeout": { + "default": "10m0s", + "description": "Timeout for waiting on compactor to become ACTIVE in the ring.", + "type": "string", + "x-cli-flag": "compactor.ring.wait-active-instance-timeout", + "x-format": "duration" + }, + "wait_stability_max_duration": { + "default": "5m0s", + "description": "Maximum time to wait for ring stability at startup. If the compactor ring keeps changing after this period of time, the compactor will start anyway.", + "type": "string", + "x-cli-flag": "compactor.ring.wait-stability-max-duration", + "x-format": "duration" + }, + "wait_stability_min_duration": { + "default": "1m0s", + "description": "Minimum time to wait for ring stability at startup. 0 to disable.", + "type": "string", + "x-cli-flag": "compactor.ring.wait-stability-min-duration", + "x-format": "duration" + } + }, + "type": "object" + }, + "sharding_strategy": { + "default": "default", + "description": "The sharding strategy to use. Supported values are: default, shuffle-sharding.", + "type": "string", + "x-cli-flag": "compactor.sharding-strategy" + }, + "skip_blocks_with_out_of_order_chunks_enabled": { + "default": false, + "description": "When enabled, mark blocks containing index with out-of-order chunks for no compact instead of halting the compaction.", + "type": "boolean", + "x-cli-flag": "compactor.skip-blocks-with-out-of-order-chunks-enabled" + }, + "tenant_cleanup_delay": { + "default": "6h0m0s", + "description": "For tenants marked for deletion, this is time between deleting of last block, and doing final cleanup (marker files, debug files) of the tenant.", + "type": "string", + "x-cli-flag": "compactor.tenant-cleanup-delay", + "x-format": "duration" + } + }, + "type": "object" + }, + "configs_config": { + "description": "The configs_config configures the Cortex Configs DB and API.", + "properties": { + "api": { + "properties": { + "notifications": { + "properties": { + "disable_email": { + "default": false, + "description": "Disable Email notifications for Alertmanager.", + "type": "boolean", + "x-cli-flag": "configs.notifications.disable-email" + }, + "disable_webhook": { + "default": false, + "description": "Disable WebHook notifications for Alertmanager.", + "type": "boolean", + "x-cli-flag": "configs.notifications.disable-webhook" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "database": { + "properties": { + "migrations_dir": { + "description": "Path where the database migration files can be found", + "type": "string", + "x-cli-flag": "configs.database.migrations-dir" + }, + "password_file": { + "description": "File containing password (username goes in URI)", + "type": "string", + "x-cli-flag": "configs.database.password-file" + }, + "uri": { + "default": "postgres://postgres@configs-db.weave.local/configs?sslmode=disable", + "description": "URI where the database can be found (for dev you can use memory://)", + "type": "string", + "x-cli-flag": "configs.database.uri" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "configstore_config": { + "description": "The configstore_config configures the config database storing rules and alerts, and is used by the Cortex alertmanager.", + "properties": { + "client_timeout": { + "default": "5s", + "description": "Timeout for requests to Weave Cloud configs service.", + "type": "string", + "x-cli-flag": "\u003cprefix\u003e.configs.client-timeout", + "x-format": "duration" + }, + "configs_api_url": { + "description": "URL of configs API server.", + "format": "uri", + "type": "string", + "x-cli-flag": "\u003cprefix\u003e.configs.url" + }, + "tls_ca_path": { + "description": "Path to the CA certificates file to validate server certificate against. If not set, the host's root CA certificates are used.", + "type": "string", + "x-cli-flag": "\u003cprefix\u003e.configs.tls-ca-path" + }, + "tls_cert_path": { + "description": "Path to the client certificate file, which will be used for authenticating with the server. Also requires the key path to be configured.", + "type": "string", + "x-cli-flag": "\u003cprefix\u003e.configs.tls-cert-path" + }, + "tls_insecure_skip_verify": { + "default": false, + "description": "Skip validating server certificate.", + "type": "boolean", + "x-cli-flag": "\u003cprefix\u003e.configs.tls-insecure-skip-verify" + }, + "tls_key_path": { + "description": "Path to the key file for the client certificate. Also requires the client certificate to be configured.", + "type": "string", + "x-cli-flag": "\u003cprefix\u003e.configs.tls-key-path" + }, + "tls_server_name": { + "description": "Override the expected name on the server certificate.", + "type": "string", + "x-cli-flag": "\u003cprefix\u003e.configs.tls-server-name" + } + }, + "type": "object" + }, + "consul_config": { + "description": "The consul_config configures the consul client.", + "properties": { + "acl_token": { + "description": "ACL Token used to interact with Consul.", + "type": "string", + "x-cli-flag": "\u003cprefix\u003e.consul.acl-token" + }, + "consistent_reads": { + "default": false, + "description": "Enable consistent reads to Consul.", + "type": "boolean", + "x-cli-flag": "\u003cprefix\u003e.consul.consistent-reads" + }, + "host": { + "default": "localhost:8500", + "description": "Hostname and port of Consul.", + "type": "string", + "x-cli-flag": "\u003cprefix\u003e.consul.hostname" + }, + "http_client_timeout": { + "default": "20s", + "description": "HTTP timeout when talking to Consul.", + "type": "string", + "x-cli-flag": "\u003cprefix\u003e.consul.client-timeout", + "x-format": "duration" + }, + "tls_ca_path": { + "description": "Path to the CA certificates file to validate server certificate against. If not set, the host's root CA certificates are used.", + "type": "string", + "x-cli-flag": "\u003cprefix\u003e.consul.tls-ca-path" + }, + "tls_cert_path": { + "description": "Path to the client certificate file, which will be used for authenticating with the server. Also requires the key path to be configured.", + "type": "string", + "x-cli-flag": "\u003cprefix\u003e.consul.tls-cert-path" + }, + "tls_enabled": { + "default": false, + "description": "Enable TLS.", + "type": "boolean", + "x-cli-flag": "\u003cprefix\u003e.consul.tls-enabled" + }, + "tls_insecure_skip_verify": { + "default": false, + "description": "Skip validating server certificate.", + "type": "boolean", + "x-cli-flag": "\u003cprefix\u003e.consul.tls-insecure-skip-verify" + }, + "tls_key_path": { + "description": "Path to the key file for the client certificate. Also requires the client certificate to be configured.", + "type": "string", + "x-cli-flag": "\u003cprefix\u003e.consul.tls-key-path" + }, + "tls_server_name": { + "description": "Override the expected name on the server certificate.", + "type": "string", + "x-cli-flag": "\u003cprefix\u003e.consul.tls-server-name" + }, + "watch_burst_size": { + "default": 1, + "description": "Burst size used in rate limit. Values less than 1 are treated as 1.", + "type": "number", + "x-cli-flag": "\u003cprefix\u003e.consul.watch-burst-size" + }, + "watch_rate_limit": { + "default": 1, + "description": "Rate limit when watching key or prefix in Consul, in requests per second. 0 disables the rate limit.", + "type": "number", + "x-cli-flag": "\u003cprefix\u003e.consul.watch-rate-limit" + } + }, + "type": "object" + }, + "distributor_config": { + "description": "The distributor_config configures the Cortex distributor.", + "properties": { + "extend_writes": { + "default": true, + "description": "Try writing to an additional ingester in the presence of an ingester not in the ACTIVE state. It is useful to disable this along with -ingester.unregister-on-shutdown=false in order to not spread samples to extra ingesters during rolling restarts with consistent naming.", + "type": "boolean", + "x-cli-flag": "distributor.extend-writes" + }, + "extra_queue_delay": { + "default": "0s", + "description": "Time to wait before sending more than the minimum successful query requests.", + "type": "string", + "x-cli-flag": "distributor.extra-query-delay", + "x-format": "duration" + }, + "ha_tracker": { + "properties": { + "enable_ha_tracker": { + "default": false, + "description": "Enable the distributors HA tracker so that it can accept samples from Prometheus HA replicas gracefully (requires labels).", + "type": "boolean", + "x-cli-flag": "distributor.ha-tracker.enable" + }, + "ha_tracker_failover_timeout": { + "default": "30s", + "description": "If we don't receive any samples from the accepted replica for a cluster in this amount of time we will failover to the next replica we receive a sample from. This value must be greater than the update timeout", + "type": "string", + "x-cli-flag": "distributor.ha-tracker.failover-timeout", + "x-format": "duration" + }, + "ha_tracker_update_timeout": { + "default": "15s", + "description": "Update the timestamp in the KV store for a given cluster/replica only after this amount of time has passed since the current stored timestamp.", + "type": "string", + "x-cli-flag": "distributor.ha-tracker.update-timeout", + "x-format": "duration" + }, + "ha_tracker_update_timeout_jitter_max": { + "default": "5s", + "description": "Maximum jitter applied to the update timeout, in order to spread the HA heartbeats over time.", + "type": "string", + "x-cli-flag": "distributor.ha-tracker.update-timeout-jitter-max", + "x-format": "duration" + }, + "kvstore": { + "description": "Backend storage to use for the ring. Please be aware that memberlist is not supported by the HA tracker since gossip propagation is too slow for HA purposes.", + "properties": { + "consul": { + "$ref": "#/definitions/consul_config" + }, + "dynamodb": { + "properties": { + "max_cas_retries": { + "default": 10, + "description": "Maximum number of retries for DDB KV CAS.", + "type": "number", + "x-cli-flag": "distributor.ha-tracker.dynamodb.max-cas-retries" + }, + "puller_sync_time": { + "default": "1m0s", + "description": "Time to refresh local ring with information on dynamodb.", + "type": "string", + "x-cli-flag": "distributor.ha-tracker.dynamodb.puller-sync-time", + "x-format": "duration" + }, + "region": { + "description": "Region to access dynamodb.", + "type": "string", + "x-cli-flag": "distributor.ha-tracker.dynamodb.region" + }, + "table_name": { + "description": "Table name to use on dynamodb.", + "type": "string", + "x-cli-flag": "distributor.ha-tracker.dynamodb.table-name" + }, + "timeout": { + "default": "2m0s", + "description": "Timeout of dynamoDbClient requests. Default is 2m.", + "type": "string", + "x-cli-flag": "distributor.ha-tracker.dynamodb.timeout", + "x-format": "duration" + }, + "ttl": { + "default": "0s", + "description": "Time to expire items on dynamodb.", + "type": "string", + "x-cli-flag": "distributor.ha-tracker.dynamodb.ttl-time", + "x-format": "duration" + } + }, + "type": "object" + }, + "etcd": { + "$ref": "#/definitions/etcd_config" + }, + "multi": { + "properties": { + "mirror_enabled": { + "default": false, + "description": "Mirror writes to secondary store.", + "type": "boolean", + "x-cli-flag": "distributor.ha-tracker.multi.mirror-enabled" + }, + "mirror_timeout": { + "default": "2s", + "description": "Timeout for storing value to secondary store.", + "type": "string", + "x-cli-flag": "distributor.ha-tracker.multi.mirror-timeout", + "x-format": "duration" + }, + "primary": { + "description": "Primary backend storage used by multi-client.", + "type": "string", + "x-cli-flag": "distributor.ha-tracker.multi.primary" + }, + "secondary": { + "description": "Secondary backend storage used by multi-client.", + "type": "string", + "x-cli-flag": "distributor.ha-tracker.multi.secondary" + } + }, + "type": "object" + }, + "prefix": { + "default": "ha-tracker/", + "description": "The prefix for the keys in the store. Should end with a /.", + "type": "string", + "x-cli-flag": "distributor.ha-tracker.prefix" + }, + "store": { + "default": "consul", + "description": "Backend storage to use for the ring. Supported values are: consul, dynamodb, etcd, inmemory, memberlist, multi.", + "type": "string", + "x-cli-flag": "distributor.ha-tracker.store" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "instance_limits": { + "properties": { + "max_inflight_client_requests": { + "default": 0, + "description": "Max inflight ingester client requests that this distributor can handle. This limit is per-distributor, not per-tenant. Additional requests will be rejected. 0 = unlimited.", + "type": "number", + "x-cli-flag": "distributor.instance-limits.max-inflight-client-requests" + }, + "max_inflight_push_requests": { + "default": 0, + "description": "Max inflight push requests that this distributor can handle. This limit is per-distributor, not per-tenant. Additional requests will be rejected. 0 = unlimited.", + "type": "number", + "x-cli-flag": "distributor.instance-limits.max-inflight-push-requests" + }, + "max_ingestion_rate": { + "default": 0, + "description": "Max ingestion rate (samples/sec) that this distributor will accept. This limit is per-distributor, not per-tenant. Additional push requests will be rejected. Current ingestion rate is computed as exponentially weighted moving average, updated every second. 0 = unlimited.", + "type": "number", + "x-cli-flag": "distributor.instance-limits.max-ingestion-rate" + } + }, + "type": "object" + }, + "max_recv_msg_size": { + "default": 104857600, + "description": "remote_write API max receive message size (bytes).", + "type": "number", + "x-cli-flag": "distributor.max-recv-msg-size" + }, + "num_push_workers": { + "default": 0, + "description": "EXPERIMENTAL: Number of go routines to handle push calls from distributors to ingesters. When no workers are available, a new goroutine will be spawned automatically. If set to 0 (default), workers are disabled, and a new goroutine will be created for each push request.", + "type": "number", + "x-cli-flag": "distributor.num-push-workers" + }, + "otlp": { + "properties": { + "allow_delta_temporality": { + "default": false, + "description": "EXPERIMENTAL: If true, delta temporality otlp metrics to be ingested.", + "type": "boolean", + "x-cli-flag": "distributor.otlp.allow-delta-temporality" + }, + "convert_all_attributes": { + "default": false, + "description": "If true, all resource attributes are converted to labels.", + "type": "boolean", + "x-cli-flag": "distributor.otlp.convert-all-attributes" + }, + "disable_target_info": { + "default": false, + "description": "If true, a target_info metric is not ingested. (refer to: https://github.com/prometheus/OpenMetrics/blob/main/specification/OpenMetrics.md#supporting-target-metadata-in-both-push-based-and-pull-based-systems)", + "type": "boolean", + "x-cli-flag": "distributor.otlp.disable-target-info" + }, + "enable_type_and_unit_labels": { + "default": false, + "description": "EXPERIMENTAL: If true, the '__type__' and '__unit__' labels are added for the OTLP metrics.", + "type": "boolean", + "x-cli-flag": "distributor.otlp.enable-type-and-unit-labels" + } + }, + "type": "object" + }, + "otlp_max_recv_msg_size": { + "default": 104857600, + "description": "Maximum OTLP request size in bytes that the Distributor can accept.", + "type": "number", + "x-cli-flag": "distributor.otlp-max-recv-msg-size" + }, + "pool": { + "properties": { + "client_cleanup_period": { + "default": "15s", + "description": "How frequently to clean up clients for ingesters that have gone away.", + "type": "string", + "x-cli-flag": "distributor.client-cleanup-period", + "x-format": "duration" + }, + "health_check_ingesters": { + "default": true, + "description": "Run a health check on each ingester client during periodic cleanup.", + "type": "boolean", + "x-cli-flag": "distributor.health-check-ingesters" + } + }, + "type": "object" + }, + "remote_timeout": { + "default": "2s", + "description": "Timeout for downstream ingesters.", + "type": "string", + "x-cli-flag": "distributor.remote-timeout", + "x-format": "duration" + }, + "remote_writev2_enabled": { + "default": false, + "description": "EXPERIMENTAL: If true, accept prometheus remote write v2 protocol push request.", + "type": "boolean", + "x-cli-flag": "distributor.remote-writev2-enabled" + }, + "ring": { + "properties": { + "detailed_metrics_enabled": { + "default": true, + "description": "Set to true to enable ring detailed metrics. These metrics provide detailed information, such as token count and ownership per tenant. Disabling them can significantly decrease the number of metrics emitted.", + "type": "boolean", + "x-cli-flag": "distributor.ring.detailed-metrics-enabled" + }, + "heartbeat_period": { + "default": "5s", + "description": "Period at which to heartbeat to the ring. 0 = disabled.", + "type": "string", + "x-cli-flag": "distributor.ring.heartbeat-period", + "x-format": "duration" + }, + "heartbeat_timeout": { + "default": "1m0s", + "description": "The heartbeat timeout after which distributors are considered unhealthy within the ring. 0 = never (timeout disabled).", + "type": "string", + "x-cli-flag": "distributor.ring.heartbeat-timeout", + "x-format": "duration" + }, + "instance_interface_names": { + "default": "[eth0 en0]", + "description": "Name of network interface to read address from.", + "items": { + "type": "string" + }, + "type": "array", + "x-cli-flag": "distributor.ring.instance-interface-names" + }, + "kvstore": { + "properties": { + "consul": { + "$ref": "#/definitions/consul_config" + }, + "dynamodb": { + "properties": { + "max_cas_retries": { + "default": 10, + "description": "Maximum number of retries for DDB KV CAS.", + "type": "number", + "x-cli-flag": "distributor.ring.dynamodb.max-cas-retries" + }, + "puller_sync_time": { + "default": "1m0s", + "description": "Time to refresh local ring with information on dynamodb.", + "type": "string", + "x-cli-flag": "distributor.ring.dynamodb.puller-sync-time", + "x-format": "duration" + }, + "region": { + "description": "Region to access dynamodb.", + "type": "string", + "x-cli-flag": "distributor.ring.dynamodb.region" + }, + "table_name": { + "description": "Table name to use on dynamodb.", + "type": "string", + "x-cli-flag": "distributor.ring.dynamodb.table-name" + }, + "timeout": { + "default": "2m0s", + "description": "Timeout of dynamoDbClient requests. Default is 2m.", + "type": "string", + "x-cli-flag": "distributor.ring.dynamodb.timeout", + "x-format": "duration" + }, + "ttl": { + "default": "0s", + "description": "Time to expire items on dynamodb.", + "type": "string", + "x-cli-flag": "distributor.ring.dynamodb.ttl-time", + "x-format": "duration" + } + }, + "type": "object" + }, + "etcd": { + "$ref": "#/definitions/etcd_config" + }, + "multi": { + "properties": { + "mirror_enabled": { + "default": false, + "description": "Mirror writes to secondary store.", + "type": "boolean", + "x-cli-flag": "distributor.ring.multi.mirror-enabled" + }, + "mirror_timeout": { + "default": "2s", + "description": "Timeout for storing value to secondary store.", + "type": "string", + "x-cli-flag": "distributor.ring.multi.mirror-timeout", + "x-format": "duration" + }, + "primary": { + "description": "Primary backend storage used by multi-client.", + "type": "string", + "x-cli-flag": "distributor.ring.multi.primary" + }, + "secondary": { + "description": "Secondary backend storage used by multi-client.", + "type": "string", + "x-cli-flag": "distributor.ring.multi.secondary" + } + }, + "type": "object" + }, + "prefix": { + "default": "collectors/", + "description": "The prefix for the keys in the store. Should end with a /.", + "type": "string", + "x-cli-flag": "distributor.ring.prefix" + }, + "store": { + "default": "consul", + "description": "Backend storage to use for the ring. Supported values are: consul, dynamodb, etcd, inmemory, memberlist, multi.", + "type": "string", + "x-cli-flag": "distributor.ring.store" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "shard_by_all_labels": { + "default": false, + "description": "Distribute samples based on all labels, as opposed to solely by user and metric name.", + "type": "boolean", + "x-cli-flag": "distributor.shard-by-all-labels" + }, + "sharding_strategy": { + "default": "default", + "description": "The sharding strategy to use. Supported values are: default, shuffle-sharding.", + "type": "string", + "x-cli-flag": "distributor.sharding-strategy" + }, + "sign_write_requests": { + "default": false, + "description": "EXPERIMENTAL: If enabled, sign the write request between distributors and ingesters.", + "type": "boolean", + "x-cli-flag": "distributor.sign-write-requests" + }, + "use_stream_push": { + "default": false, + "description": "EXPERIMENTAL: If enabled, distributor would use stream connection to send requests to ingesters.", + "type": "boolean", + "x-cli-flag": "distributor.use-stream-push" + } + }, + "type": "object" + }, + "etcd_config": { + "description": "The etcd_config configures the etcd client.", + "properties": { + "dial_timeout": { + "default": "10s", + "description": "The dial timeout for the etcd connection.", + "type": "string", + "x-cli-flag": "\u003cprefix\u003e.etcd.dial-timeout", + "x-format": "duration" + }, + "endpoints": { + "default": [], + "description": "The etcd endpoints to connect to.", + "items": { + "type": "string" + }, + "type": "array", + "x-cli-flag": "\u003cprefix\u003e.etcd.endpoints" + }, + "max_retries": { + "default": 10, + "description": "The maximum number of retries to do for failed ops.", + "type": "number", + "x-cli-flag": "\u003cprefix\u003e.etcd.max-retries" + }, + "password": { + "description": "Etcd password.", + "type": "string", + "x-cli-flag": "\u003cprefix\u003e.etcd.password" + }, + "ping-without-stream-allowed": { + "default": true, + "description": "Send Keepalive pings with no streams.", + "type": "boolean", + "x-cli-flag": "\u003cprefix\u003e.etcd.ping-without-stream-allowed" + }, + "tls_ca_path": { + "description": "Path to the CA certificates file to validate server certificate against. If not set, the host's root CA certificates are used.", + "type": "string", + "x-cli-flag": "\u003cprefix\u003e.etcd.tls-ca-path" + }, + "tls_cert_path": { + "description": "Path to the client certificate file, which will be used for authenticating with the server. Also requires the key path to be configured.", + "type": "string", + "x-cli-flag": "\u003cprefix\u003e.etcd.tls-cert-path" + }, + "tls_enabled": { + "default": false, + "description": "Enable TLS.", + "type": "boolean", + "x-cli-flag": "\u003cprefix\u003e.etcd.tls-enabled" + }, + "tls_insecure_skip_verify": { + "default": false, + "description": "Skip validating server certificate.", + "type": "boolean", + "x-cli-flag": "\u003cprefix\u003e.etcd.tls-insecure-skip-verify" + }, + "tls_key_path": { + "description": "Path to the key file for the client certificate. Also requires the client certificate to be configured.", + "type": "string", + "x-cli-flag": "\u003cprefix\u003e.etcd.tls-key-path" + }, + "tls_server_name": { + "description": "Override the expected name on the server certificate.", + "type": "string", + "x-cli-flag": "\u003cprefix\u003e.etcd.tls-server-name" + }, + "username": { + "description": "Etcd username.", + "type": "string", + "x-cli-flag": "\u003cprefix\u003e.etcd.username" + } + }, + "type": "object" + }, + "fifo_cache_config": { + "description": "The fifo_cache_config configures the local in-memory cache.", + "properties": { + "max_size_bytes": { + "description": "Maximum memory size of the cache in bytes. A unit suffix (KB, MB, GB) may be applied.", + "type": "string", + "x-cli-flag": "frontend.fifocache.max-size-bytes" + }, + "max_size_items": { + "default": 0, + "description": "Maximum number of entries in the cache.", + "type": "number", + "x-cli-flag": "frontend.fifocache.max-size-items" + }, + "size": { + "default": 0, + "description": "Deprecated (use max-size-items or max-size-bytes instead): The number of entries to cache. ", + "type": "number", + "x-cli-flag": "frontend.fifocache.size" + }, + "validity": { + "default": "0s", + "description": "The expiry duration for the cache.", + "type": "string", + "x-cli-flag": "frontend.fifocache.duration", + "x-format": "duration" + } + }, + "type": "object" + }, + "flusher_config": { + "description": "The flusher_config configures the WAL flusher target, used to manually run one-time flushes when scaling down ingesters.", + "properties": { + "exit_after_flush": { + "default": true, + "description": "Stop Cortex after flush has finished. If false, Cortex process will keep running, doing nothing.", + "type": "boolean", + "x-cli-flag": "flusher.exit-after-flush" + } + }, + "type": "object" + }, + "frontend_worker_config": { + "description": "The frontend_worker_config configures the worker - running within the Cortex querier - picking up and executing queries enqueued by the query-frontend or query-scheduler.", + "properties": { + "dns_lookup_duration": { + "default": "10s", + "description": "How often to query DNS for query-frontend or query-scheduler address.", + "type": "string", + "x-cli-flag": "querier.dns-lookup-period", + "x-format": "duration" + }, + "frontend_address": { + "description": "Address of query frontend service, in host:port format. If -querier.scheduler-address is set as well, querier will use scheduler instead. Only one of -querier.frontend-address or -querier.scheduler-address can be set. If neither is set, queries are only received via HTTP endpoint.", + "type": "string", + "x-cli-flag": "querier.frontend-address" + }, + "grpc_client_config": { + "properties": { + "backoff_config": { + "properties": { + "max_period": { + "default": "10s", + "description": "Maximum delay when backing off.", + "type": "string", + "x-cli-flag": "querier.frontend-client.backoff-max-period", + "x-format": "duration" + }, + "max_retries": { + "default": 10, + "description": "Number of times to backoff and retry before failing.", + "type": "number", + "x-cli-flag": "querier.frontend-client.backoff-retries" + }, + "min_period": { + "default": "100ms", + "description": "Minimum delay when backing off.", + "type": "string", + "x-cli-flag": "querier.frontend-client.backoff-min-period", + "x-format": "duration" + } + }, + "type": "object" + }, + "backoff_on_ratelimits": { + "default": false, + "description": "Enable backoff and retry when we hit ratelimits.", + "type": "boolean", + "x-cli-flag": "querier.frontend-client.backoff-on-ratelimits" + }, + "connect_timeout": { + "default": "5s", + "description": "The maximum amount of time to establish a connection. A value of 0 means using default gRPC client connect timeout 20s.", + "type": "string", + "x-cli-flag": "querier.frontend-client.connect-timeout", + "x-format": "duration" + }, + "grpc_compression": { + "description": "Use compression when sending messages. Supported values are: 'gzip', 'snappy', 'snappy-block' ,'zstd' and '' (disable compression)", + "type": "string", + "x-cli-flag": "querier.frontend-client.grpc-compression" + }, + "max_recv_msg_size": { + "default": 104857600, + "description": "gRPC client max receive message size (bytes).", + "type": "number", + "x-cli-flag": "querier.frontend-client.grpc-max-recv-msg-size" + }, + "max_send_msg_size": { + "default": 16777216, + "description": "gRPC client max send message size (bytes).", + "type": "number", + "x-cli-flag": "querier.frontend-client.grpc-max-send-msg-size" + }, + "rate_limit": { + "default": 0, + "description": "Rate limit for gRPC client; 0 means disabled.", + "type": "number", + "x-cli-flag": "querier.frontend-client.grpc-client-rate-limit" + }, + "rate_limit_burst": { + "default": 0, + "description": "Rate limit burst for gRPC client.", + "type": "number", + "x-cli-flag": "querier.frontend-client.grpc-client-rate-limit-burst" + }, + "tls_ca_path": { + "description": "Path to the CA certificates file to validate server certificate against. If not set, the host's root CA certificates are used.", + "type": "string", + "x-cli-flag": "querier.frontend-client.tls-ca-path" + }, + "tls_cert_path": { + "description": "Path to the client certificate file, which will be used for authenticating with the server. Also requires the key path to be configured.", + "type": "string", + "x-cli-flag": "querier.frontend-client.tls-cert-path" + }, + "tls_enabled": { + "default": false, + "description": "Enable TLS in the GRPC client. This flag needs to be enabled when any other TLS flag is set. If set to false, insecure connection to gRPC server will be used.", + "type": "boolean", + "x-cli-flag": "querier.frontend-client.tls-enabled" + }, + "tls_insecure_skip_verify": { + "default": false, + "description": "Skip validating server certificate.", + "type": "boolean", + "x-cli-flag": "querier.frontend-client.tls-insecure-skip-verify" + }, + "tls_key_path": { + "description": "Path to the key file for the client certificate. Also requires the client certificate to be configured.", + "type": "string", + "x-cli-flag": "querier.frontend-client.tls-key-path" + }, + "tls_server_name": { + "description": "Override the expected name on the server certificate.", + "type": "string", + "x-cli-flag": "querier.frontend-client.tls-server-name" + } + }, + "type": "object" + }, + "id": { + "description": "Querier ID, sent to frontend service to identify requests from the same querier. Defaults to hostname.", + "type": "string", + "x-cli-flag": "querier.id" + }, + "instance_interface_names": { + "default": "[eth0 en0]", + "description": "Name of network interface to read address from.", + "items": { + "type": "string" + }, + "type": "array", + "x-cli-flag": "querier.instance-interface-names" + }, + "match_max_concurrent": { + "default": false, + "description": "Force worker concurrency to match the -querier.max-concurrent option. Overrides querier.worker-parallelism.", + "type": "boolean", + "x-cli-flag": "querier.worker-match-max-concurrent" + }, + "parallelism": { + "default": 10, + "description": "Number of simultaneous queries to process per query-frontend or query-scheduler.", + "type": "number", + "x-cli-flag": "querier.worker-parallelism" + }, + "scheduler_address": { + "description": "Hostname (and port) of scheduler that querier will periodically resolve, connect to and receive queries from. Only one of -querier.frontend-address or -querier.scheduler-address can be set. If neither is set, queries are only received via HTTP endpoint.", + "type": "string", + "x-cli-flag": "querier.scheduler-address" + } + }, + "type": "object" + }, + "ingester_client_config": { + "description": "The ingester_client_config configures how the Cortex distributors connect to the ingesters.", + "properties": { + "grpc_client_config": { + "properties": { + "backoff_config": { + "properties": { + "max_period": { + "default": "10s", + "description": "Maximum delay when backing off.", + "type": "string", + "x-cli-flag": "ingester.client.backoff-max-period", + "x-format": "duration" + }, + "max_retries": { + "default": 10, + "description": "Number of times to backoff and retry before failing.", + "type": "number", + "x-cli-flag": "ingester.client.backoff-retries" + }, + "min_period": { + "default": "100ms", + "description": "Minimum delay when backing off.", + "type": "string", + "x-cli-flag": "ingester.client.backoff-min-period", + "x-format": "duration" + } + }, + "type": "object" + }, + "backoff_on_ratelimits": { + "default": false, + "description": "Enable backoff and retry when we hit ratelimits.", + "type": "boolean", + "x-cli-flag": "ingester.client.backoff-on-ratelimits" + }, + "connect_timeout": { + "default": "5s", + "description": "The maximum amount of time to establish a connection. A value of 0 means using default gRPC client connect timeout 20s.", + "type": "string", + "x-cli-flag": "ingester.client.connect-timeout", + "x-format": "duration" + }, + "grpc_compression": { + "default": "snappy-block", + "description": "Use compression when sending messages. Supported values are: 'gzip', 'snappy', 'snappy-block' ,'zstd' and '' (disable compression)", + "type": "string", + "x-cli-flag": "ingester.client.grpc-compression" + }, + "healthcheck_config": { + "description": "EXPERIMENTAL: If enabled, gRPC clients perform health checks for each target and fail the request if the target is marked as unhealthy.", + "properties": { + "interval": { + "default": "5s", + "description": "The approximate amount of time between health checks of an individual target.", + "type": "string", + "x-cli-flag": "ingester.client.healthcheck.interval", + "x-format": "duration" + }, + "timeout": { + "default": "1s", + "description": "The amount of time during which no response from a target means a failed health check.", + "type": "string", + "x-cli-flag": "ingester.client.healthcheck.timeout", + "x-format": "duration" + }, + "unhealthy_threshold": { + "default": 0, + "description": "The number of consecutive failed health checks required before considering a target unhealthy. 0 means disabled.", + "type": "number", + "x-cli-flag": "ingester.client.healthcheck.unhealthy-threshold" + } + }, + "type": "object" + }, + "max_recv_msg_size": { + "default": 104857600, + "description": "gRPC client max receive message size (bytes).", + "type": "number", + "x-cli-flag": "ingester.client.grpc-max-recv-msg-size" + }, + "max_send_msg_size": { + "default": 16777216, + "description": "gRPC client max send message size (bytes).", + "type": "number", + "x-cli-flag": "ingester.client.grpc-max-send-msg-size" + }, + "rate_limit": { + "default": 0, + "description": "Rate limit for gRPC client; 0 means disabled.", + "type": "number", + "x-cli-flag": "ingester.client.grpc-client-rate-limit" + }, + "rate_limit_burst": { + "default": 0, + "description": "Rate limit burst for gRPC client.", + "type": "number", + "x-cli-flag": "ingester.client.grpc-client-rate-limit-burst" + }, + "tls_ca_path": { + "description": "Path to the CA certificates file to validate server certificate against. If not set, the host's root CA certificates are used.", + "type": "string", + "x-cli-flag": "ingester.client.tls-ca-path" + }, + "tls_cert_path": { + "description": "Path to the client certificate file, which will be used for authenticating with the server. Also requires the key path to be configured.", + "type": "string", + "x-cli-flag": "ingester.client.tls-cert-path" + }, + "tls_enabled": { + "default": false, + "description": "Enable TLS in the GRPC client. This flag needs to be enabled when any other TLS flag is set. If set to false, insecure connection to gRPC server will be used.", + "type": "boolean", + "x-cli-flag": "ingester.client.tls-enabled" + }, + "tls_insecure_skip_verify": { + "default": false, + "description": "Skip validating server certificate.", + "type": "boolean", + "x-cli-flag": "ingester.client.tls-insecure-skip-verify" + }, + "tls_key_path": { + "description": "Path to the key file for the client certificate. Also requires the client certificate to be configured.", + "type": "string", + "x-cli-flag": "ingester.client.tls-key-path" + }, + "tls_server_name": { + "description": "Override the expected name on the server certificate.", + "type": "string", + "x-cli-flag": "ingester.client.tls-server-name" + } + }, + "type": "object" + }, + "max_inflight_push_requests": { + "default": 0, + "description": "Max inflight push requests that this ingester client can handle. This limit is per-ingester-client. Additional requests will be rejected. 0 = unlimited.", + "type": "number", + "x-cli-flag": "ingester.client.max-inflight-push-requests" + } + }, + "type": "object" + }, + "ingester_config": { + "description": "The ingester_config configures the Cortex ingester.", + "properties": { + "active_series_metrics_enabled": { + "default": true, + "description": "Enable tracking of active series and export them as metrics.", + "type": "boolean", + "x-cli-flag": "ingester.active-series-metrics-enabled" + }, + "active_series_metrics_idle_timeout": { + "default": "10m0s", + "description": "After what time a series is considered to be inactive.", + "type": "string", + "x-cli-flag": "ingester.active-series-metrics-idle-timeout", + "x-format": "duration" + }, + "active_series_metrics_update_period": { + "default": "1m0s", + "description": "How often to update active series metrics.", + "type": "string", + "x-cli-flag": "ingester.active-series-metrics-update-period", + "x-format": "duration" + }, + "admin_limit_message": { + "default": "please contact administrator to raise it", + "description": "Customize the message contained in limit errors", + "type": "string", + "x-cli-flag": "ingester.admin-limit-message" + }, + "disable_chunk_trimming": { + "default": false, + "description": "Disable trimming of matching series chunks based on query Start and End time. When disabled, the result may contain samples outside the queried time range but select performances may be improved. Note that certain query results might change by changing this option.", + "type": "boolean", + "x-cli-flag": "ingester.disable-chunk-trimming" + }, + "ignore_series_limit_for_metric_names": { + "description": "Comma-separated list of metric names, for which -ingester.max-series-per-metric and -ingester.max-global-series-per-metric limits will be ignored. Does not affect max-series-per-user or max-global-series-per-metric limits.", + "type": "string", + "x-cli-flag": "ingester.ignore-series-limit-for-metric-names" + }, + "instance_limits": { + "properties": { + "max_inflight_push_requests": { + "default": 0, + "description": "Max inflight push requests that this ingester can handle (across all tenants). Additional requests will be rejected. 0 = unlimited.", + "type": "number", + "x-cli-flag": "ingester.instance-limits.max-inflight-push-requests" + }, + "max_inflight_query_requests": { + "default": 0, + "description": "Max inflight query requests that this ingester can handle (across all tenants). Additional requests will be rejected. 0 = unlimited.", + "type": "number", + "x-cli-flag": "ingester.instance-limits.max-inflight-query-requests" + }, + "max_ingestion_rate": { + "default": 0, + "description": "Max ingestion rate (samples/sec) that ingester will accept. This limit is per-ingester, not per-tenant. Additional push requests will be rejected. Current ingestion rate is computed as exponentially weighted moving average, updated every second. This limit only works when using blocks engine. 0 = unlimited.", + "type": "number", + "x-cli-flag": "ingester.instance-limits.max-ingestion-rate" + }, + "max_series": { + "default": 0, + "description": "Max series that this ingester can hold (across all tenants). Requests to create additional series will be rejected. This limit only works when using blocks engine. 0 = unlimited.", + "type": "number", + "x-cli-flag": "ingester.instance-limits.max-series" + }, + "max_tenants": { + "default": 0, + "description": "Max users that this ingester can hold. Requests from additional users will be rejected. This limit only works when using blocks engine. 0 = unlimited.", + "type": "number", + "x-cli-flag": "ingester.instance-limits.max-tenants" + } + }, + "type": "object" + }, + "labels_string_interning_enabled": { + "default": false, + "description": "Experimental: Enable string interning for metrics labels.", + "type": "boolean", + "x-cli-flag": "ingester.labels-string-interning-enabled" + }, + "lifecycler": { + "properties": { + "availability_zone": { + "description": "The availability zone where this instance is running.", + "type": "string", + "x-cli-flag": "ingester.availability-zone" + }, + "final_sleep": { + "default": "30s", + "description": "Duration to sleep for before exiting, to ensure metrics are scraped.", + "type": "string", + "x-cli-flag": "ingester.final-sleep", + "x-format": "duration" + }, + "heartbeat_period": { + "default": "5s", + "description": "Period at which to heartbeat to consul. 0 = disabled.", + "type": "string", + "x-cli-flag": "ingester.heartbeat-period", + "x-format": "duration" + }, + "interface_names": { + "default": "[eth0 en0]", + "description": "Name of network interface to read address from.", + "items": { + "type": "string" + }, + "type": "array", + "x-cli-flag": "ingester.lifecycler.interface" + }, + "join_after": { + "default": "0s", + "description": "Period to wait for a claim from another member; will join automatically after this.", + "type": "string", + "x-cli-flag": "ingester.join-after", + "x-format": "duration" + }, + "min_ready_duration": { + "default": "15s", + "description": "Minimum duration to wait after the internal readiness checks have passed but before succeeding the readiness endpoint. This is used to slowdown deployment controllers (eg. Kubernetes) after an instance is ready and before they proceed with a rolling update, to give the rest of the cluster instances enough time to receive ring updates.", + "type": "string", + "x-cli-flag": "ingester.min-ready-duration", + "x-format": "duration" + }, + "num_tokens": { + "default": 128, + "description": "Number of tokens for each ingester.", + "type": "number", + "x-cli-flag": "ingester.num-tokens" + }, + "observe_period": { + "default": "0s", + "description": "Observe tokens after generating to resolve collisions. Useful when using gossiping ring.", + "type": "string", + "x-cli-flag": "ingester.observe-period", + "x-format": "duration" + }, + "readiness_check_ring_health": { + "default": true, + "description": "When enabled the readiness probe succeeds only after all instances are ACTIVE and healthy in the ring, otherwise only the instance itself is checked. This option should be disabled if in your cluster multiple instances can be rolled out simultaneously, otherwise rolling updates may be slowed down.", + "type": "boolean", + "x-cli-flag": "ingester.readiness-check-ring-health" + }, + "ring": { + "properties": { + "detailed_metrics_enabled": { + "default": true, + "description": "Set to true to enable ring detailed metrics. These metrics provide detailed information, such as token count and ownership per tenant. Disabling them can significantly decrease the number of metrics emitted by the distributors.", + "type": "boolean", + "x-cli-flag": "ring.detailed-metrics-enabled" + }, + "excluded_zones": { + "description": "Comma-separated list of zones to exclude from the ring. Instances in excluded zones will be filtered out from the ring.", + "type": "string", + "x-cli-flag": "distributor.excluded-zones" + }, + "heartbeat_timeout": { + "default": "1m0s", + "description": "The heartbeat timeout after which ingesters are skipped for reads/writes. 0 = never (timeout disabled).", + "type": "string", + "x-cli-flag": "ring.heartbeat-timeout", + "x-format": "duration" + }, + "kvstore": { + "properties": { + "consul": { + "$ref": "#/definitions/consul_config" + }, + "dynamodb": { + "properties": { + "max_cas_retries": { + "default": 10, + "description": "Maximum number of retries for DDB KV CAS.", + "type": "number", + "x-cli-flag": "dynamodb.max-cas-retries" + }, + "puller_sync_time": { + "default": "1m0s", + "description": "Time to refresh local ring with information on dynamodb.", + "type": "string", + "x-cli-flag": "dynamodb.puller-sync-time", + "x-format": "duration" + }, + "region": { + "description": "Region to access dynamodb.", + "type": "string", + "x-cli-flag": "dynamodb.region" + }, + "table_name": { + "description": "Table name to use on dynamodb.", + "type": "string", + "x-cli-flag": "dynamodb.table-name" + }, + "timeout": { + "default": "2m0s", + "description": "Timeout of dynamoDbClient requests. Default is 2m.", + "type": "string", + "x-cli-flag": "dynamodb.timeout", + "x-format": "duration" + }, + "ttl": { + "default": "0s", + "description": "Time to expire items on dynamodb.", + "type": "string", + "x-cli-flag": "dynamodb.ttl-time", + "x-format": "duration" + } + }, + "type": "object" + }, + "etcd": { + "$ref": "#/definitions/etcd_config" + }, + "multi": { + "properties": { + "mirror_enabled": { + "default": false, + "description": "Mirror writes to secondary store.", + "type": "boolean", + "x-cli-flag": "multi.mirror-enabled" + }, + "mirror_timeout": { + "default": "2s", + "description": "Timeout for storing value to secondary store.", + "type": "string", + "x-cli-flag": "multi.mirror-timeout", + "x-format": "duration" + }, + "primary": { + "description": "Primary backend storage used by multi-client.", + "type": "string", + "x-cli-flag": "multi.primary" + }, + "secondary": { + "description": "Secondary backend storage used by multi-client.", + "type": "string", + "x-cli-flag": "multi.secondary" + } + }, + "type": "object" + }, + "prefix": { + "default": "collectors/", + "description": "The prefix for the keys in the store. Should end with a /.", + "type": "string", + "x-cli-flag": "ring.prefix" + }, + "store": { + "default": "consul", + "description": "Backend storage to use for the ring. Supported values are: consul, dynamodb, etcd, inmemory, memberlist, multi.", + "type": "string", + "x-cli-flag": "ring.store" + } + }, + "type": "object" + }, + "replication_factor": { + "default": 3, + "description": "The number of ingesters to write to and read from.", + "type": "number", + "x-cli-flag": "distributor.replication-factor" + }, + "zone_awareness_enabled": { + "default": false, + "description": "True to enable the zone-awareness and replicate ingested samples across different availability zones.", + "type": "boolean", + "x-cli-flag": "distributor.zone-awareness-enabled" + } + }, + "type": "object" + }, + "tokens_file_path": { + "description": "File path where tokens are stored. If empty, tokens are not stored at shutdown and restored at startup.", + "type": "string", + "x-cli-flag": "ingester.tokens-file-path" + }, + "tokens_generator_strategy": { + "default": "random", + "description": "EXPERIMENTAL: Algorithm used to generate new ring tokens. Supported Values: random,minimize-spread", + "type": "string", + "x-cli-flag": "ingester.tokens-generator-strategy" + }, + "unregister_on_shutdown": { + "default": true, + "description": "Unregister from the ring upon clean shutdown. It can be useful to disable for rolling restarts with consistent naming in conjunction with -distributor.extend-writes=false.", + "type": "boolean", + "x-cli-flag": "ingester.unregister-on-shutdown" + } + }, + "type": "object" + }, + "matchers_cache_max_items": { + "default": 0, + "description": "Maximum number of entries in the regex matchers cache. 0 to disable.", + "type": "number", + "x-cli-flag": "ingester.matchers-cache-max-items" + }, + "metadata_retain_period": { + "default": "10m0s", + "description": "Period at which metadata we have not seen will remain in memory before being deleted.", + "type": "string", + "x-cli-flag": "ingester.metadata-retain-period", + "x-format": "duration" + }, + "query_protection": { + "properties": { + "rejection": { + "properties": { + "threshold": { + "properties": { + "cpu_utilization": { + "default": 0, + "description": "EXPERIMENTAL: Max CPU utilization that this ingester can reach before rejecting new query request (across all tenants) in percentage, between 0 and 1. monitored_resources config must include the resource type. 0 to disable.", + "type": "number", + "x-cli-flag": "ingester.query-protection.rejection.threshold.cpu-utilization" + }, + "heap_utilization": { + "default": 0, + "description": "EXPERIMENTAL: Max heap utilization that this ingester can reach before rejecting new query request (across all tenants) in percentage, between 0 and 1. monitored_resources config must include the resource type. 0 to disable.", + "type": "number", + "x-cli-flag": "ingester.query-protection.rejection.threshold.heap-utilization" + } + }, + "type": "object" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "rate_update_period": { + "default": "15s", + "description": "Period with which to update the per-user ingestion rates.", + "type": "string", + "x-cli-flag": "ingester.rate-update-period", + "x-format": "duration" + }, + "skip_metadata_limits": { + "default": true, + "description": "If enabled, the metadata API returns all metadata regardless of the limits.", + "type": "boolean", + "x-cli-flag": "ingester.skip-metadata-limits" + }, + "upload_compacted_blocks_enabled": { + "default": true, + "description": "Enable uploading compacted blocks.", + "type": "boolean", + "x-cli-flag": "ingester.upload-compacted-blocks-enabled" + }, + "user_tsdb_configs_update_period": { + "default": "15s", + "description": "Period with which to update the per-user tsdb config.", + "type": "string", + "x-cli-flag": "ingester.user-tsdb-configs-update-period", + "x-format": "duration" + } + }, + "type": "object" + }, + "limits_config": { + "description": "The limits_config configures default and per-tenant limits imposed by Cortex services (ie. distributor, ingester, ...).", + "properties": { + "accept_ha_samples": { + "default": false, + "description": "Flag to enable, for all users, handling of samples with external labels identifying replicas in an HA Prometheus setup.", + "type": "boolean", + "x-cli-flag": "distributor.ha-tracker.enable-for-all-users" + }, + "accept_mixed_ha_samples": { + "default": false, + "description": "[Experimental] Flag to enable handling of samples with mixed external labels identifying replicas in an HA Prometheus setup. Supported only if -distributor.ha-tracker.enable-for-all-users is true.", + "type": "boolean", + "x-cli-flag": "experimental.distributor.ha-tracker.mixed-ha-samples" + }, + "alertmanager_max_alerts_count": { + "default": 0, + "description": "Maximum number of alerts that a single user can have. Inserting more alerts will fail with a log message and metric increment. 0 = no limit.", + "type": "number", + "x-cli-flag": "alertmanager.max-alerts-count" + }, + "alertmanager_max_alerts_size_bytes": { + "default": 0, + "description": "Maximum total size of alerts that a single user can have, alert size is the sum of the bytes of its labels, annotations and generatorURL. Inserting more alerts will fail with a log message and metric increment. 0 = no limit.", + "type": "number", + "x-cli-flag": "alertmanager.max-alerts-size-bytes" + }, + "alertmanager_max_config_size_bytes": { + "default": 0, + "description": "Maximum size of configuration file for Alertmanager that tenant can upload via Alertmanager API. 0 = no limit.", + "type": "number", + "x-cli-flag": "alertmanager.max-config-size-bytes" + }, + "alertmanager_max_dispatcher_aggregation_groups": { + "default": 0, + "description": "Maximum number of aggregation groups in Alertmanager's dispatcher that a tenant can have. Each active aggregation group uses single goroutine. When the limit is reached, dispatcher will not dispatch alerts that belong to additional aggregation groups, but existing groups will keep working properly. 0 = no limit.", + "type": "number", + "x-cli-flag": "alertmanager.max-dispatcher-aggregation-groups" + }, + "alertmanager_max_silences_count": { + "default": 0, + "description": "Maximum number of silences that a single user can have, including expired silences. 0 = no limit.", + "type": "number", + "x-cli-flag": "alertmanager.max-silences-count" + }, + "alertmanager_max_silences_size_bytes": { + "default": 0, + "description": "Maximum size of individual silences that a single user can have. 0 = no limit.", + "type": "number", + "x-cli-flag": "alertmanager.max-silences-size-bytes" + }, + "alertmanager_max_template_size_bytes": { + "default": 0, + "description": "Maximum size of single template in tenant's Alertmanager configuration uploaded via Alertmanager API. 0 = no limit.", + "type": "number", + "x-cli-flag": "alertmanager.max-template-size-bytes" + }, + "alertmanager_max_templates_count": { + "default": 0, + "description": "Maximum number of templates in tenant's Alertmanager configuration uploaded via Alertmanager API. 0 = no limit.", + "type": "number", + "x-cli-flag": "alertmanager.max-templates-count" + }, + "alertmanager_notification_rate_limit": { + "default": 0, + "description": "Per-user rate limit for sending notifications from Alertmanager in notifications/sec. 0 = rate limit disabled. Negative value = no notifications are allowed.", + "type": "number", + "x-cli-flag": "alertmanager.notification-rate-limit" + }, + "alertmanager_notification_rate_limit_per_integration": { + "additionalProperties": true, + "default": "{}", + "description": "Per-integration notification rate limits. Value is a map, where each key is integration name and value is a rate-limit (float). On command line, this map is given in JSON format. Rate limit has the same meaning as -alertmanager.notification-rate-limit, but only applies for specific integration. Allowed integration names: webhook, email, pagerduty, opsgenie, wechat, slack, victorops, pushover, sns, telegram, discord, webex, msteams, msteamsv2, jira, rocketchat.", + "type": "object", + "x-cli-flag": "alertmanager.notification-rate-limit-per-integration" + }, + "alertmanager_receivers_firewall_block_cidr_networks": { + "description": "Comma-separated list of network CIDRs to block in Alertmanager receiver integrations.", + "type": "string", + "x-cli-flag": "alertmanager.receivers-firewall-block-cidr-networks" + }, + "alertmanager_receivers_firewall_block_private_addresses": { + "default": false, + "description": "True to block private and local addresses in Alertmanager receiver integrations. It blocks private addresses defined by RFC 1918 (IPv4 addresses) and RFC 4193 (IPv6 addresses), as well as loopback, local unicast and local multicast addresses.", + "type": "boolean", + "x-cli-flag": "alertmanager.receivers-firewall-block-private-addresses" + }, + "compactor_blocks_retention_period": { + "default": "0s", + "description": "Delete blocks containing samples older than the specified retention period. 0 to disable.", + "type": "string", + "x-cli-flag": "compactor.blocks-retention-period", + "x-format": "duration" + }, + "compactor_partition_index_size_bytes": { + "default": 68719476736, + "description": "Index size limit in bytes for each compaction partition. 0 means no limit", + "type": "number", + "x-cli-flag": "compactor.partition-index-size-bytes" + }, + "compactor_partition_series_count": { + "default": 0, + "description": "Time series count limit for each compaction partition. 0 means no limit", + "type": "number", + "x-cli-flag": "compactor.partition-series-count" + }, + "compactor_tenant_shard_size": { + "default": 0, + "description": "The default tenant's shard size when the shuffle-sharding strategy is used by the compactor. When this setting is specified in the per-tenant overrides, a value of 0 disables shuffle sharding for the tenant. If the value is \u003c 1 and \u003e 0 the shard size will be a percentage of the total compactors", + "type": "number", + "x-cli-flag": "compactor.tenant-shard-size" + }, + "creation_grace_period": { + "default": "10m", + "description": "Duration which table will be created/deleted before/after it's needed; we won't accept sample from before this time.", + "type": "string", + "x-cli-flag": "validation.create-grace-period", + "x-format": "duration" + }, + "disabled_rule_groups": { + "default": [], + "description": "list of rule groups to disable", + "items": { + "type": "string" + }, + "type": "array" + }, + "drop_labels": { + "default": [], + "description": "This flag can be used to specify label names that to drop during sample ingestion within the distributor and can be repeated in order to drop multiple labels.", + "items": { + "type": "string" + }, + "type": "array", + "x-cli-flag": "distributor.drop-label" + }, + "enable_native_histograms": { + "default": false, + "description": "[EXPERIMENTAL] True to enable native histogram.", + "type": "boolean", + "x-cli-flag": "blocks-storage.tsdb.enable-native-histograms" + }, + "enforce_metadata_metric_name": { + "default": true, + "description": "Enforce every metadata has a metric name.", + "type": "boolean", + "x-cli-flag": "validation.enforce-metadata-metric-name" + }, + "enforce_metric_name": { + "default": true, + "description": "Enforce every sample has a metric name.", + "type": "boolean", + "x-cli-flag": "validation.enforce-metric-name" + }, + "ha_cluster_label": { + "default": "cluster", + "description": "Prometheus label to look for in samples to identify a Prometheus HA cluster.", + "type": "string", + "x-cli-flag": "distributor.ha-tracker.cluster" + }, + "ha_max_clusters": { + "default": 0, + "description": "Maximum number of clusters that HA tracker will keep track of for single user. 0 to disable the limit.", + "type": "number", + "x-cli-flag": "distributor.ha-tracker.max-clusters" + }, + "ha_replica_label": { + "default": "__replica__", + "description": "Prometheus label to look for in samples to identify a Prometheus HA replica.", + "type": "string", + "x-cli-flag": "distributor.ha-tracker.replica" + }, + "ingestion_burst_size": { + "default": 50000, + "description": "Per-user allowed ingestion burst size (in number of samples).", + "type": "number", + "x-cli-flag": "distributor.ingestion-burst-size" + }, + "ingestion_rate": { + "default": 25000, + "description": "Per-user ingestion rate limit in samples per second.", + "type": "number", + "x-cli-flag": "distributor.ingestion-rate-limit" + }, + "ingestion_rate_strategy": { + "default": "local", + "description": "Whether the ingestion rate limit should be applied individually to each distributor instance (local), or evenly shared across the cluster (global).", + "type": "string", + "x-cli-flag": "distributor.ingestion-rate-limit-strategy" + }, + "ingestion_tenant_shard_size": { + "default": 0, + "description": "The default tenant's shard size when the shuffle-sharding strategy is used. Must be set both on ingesters and distributors. When this setting is specified in the per-tenant overrides, a value of 0 disables shuffle sharding for the tenant.", + "type": "number", + "x-cli-flag": "distributor.ingestion-tenant-shard-size" + }, + "limits_per_label_set": { + "default": [], + "description": "[Experimental] Enable limits per LabelSet. Supported limits per labelSet: [max_series]", + "items": { + "type": "string" + }, + "type": "array" + }, + "max_cache_freshness": { + "default": "1m", + "description": "Most recent allowed cacheable result per-tenant, to prevent caching very recent results that might still be in flux.", + "type": "string", + "x-cli-flag": "frontend.max-cache-freshness", + "x-format": "duration" + }, + "max_downloaded_bytes_per_request": { + "default": 0, + "description": "The maximum number of data bytes to download per gRPC request in Store Gateway, including Series/LabelNames/LabelValues requests. 0 to disable.", + "type": "number", + "x-cli-flag": "store-gateway.max-downloaded-bytes-per-request" + }, + "max_exemplars": { + "default": 0, + "description": "Enables support for exemplars in TSDB and sets the maximum number that will be stored. less than zero means disabled. If the value is set to zero, cortex will fallback to blocks-storage.tsdb.max-exemplars value.", + "type": "number", + "x-cli-flag": "ingester.max-exemplars" + }, + "max_fetched_chunk_bytes_per_query": { + "default": 0, + "description": "Deprecated (use max-fetched-data-bytes-per-query instead): The maximum size of all chunks in bytes that a query can fetch from each ingester and storage. This limit is enforced in the querier, ruler and store-gateway. 0 to disable.", + "type": "number", + "x-cli-flag": "querier.max-fetched-chunk-bytes-per-query" + }, + "max_fetched_chunks_per_query": { + "default": 2000000, + "description": "Maximum number of chunks that can be fetched in a single query from ingesters and long-term storage. This limit is enforced in the querier, ruler and store-gateway. 0 to disable.", + "type": "number", + "x-cli-flag": "querier.max-fetched-chunks-per-query" + }, + "max_fetched_data_bytes_per_query": { + "default": 0, + "description": "The maximum combined size of all data that a query can fetch from each ingester and storage. This limit is enforced in the querier and ruler for `query`, `query_range` and `series` APIs. 0 to disable.", + "type": "number", + "x-cli-flag": "querier.max-fetched-data-bytes-per-query" + }, + "max_fetched_series_per_query": { + "default": 0, + "description": "The maximum number of unique series for which a query can fetch samples from each ingesters and blocks storage. This limit is enforced in the querier, ruler and store-gateway. 0 to disable", + "type": "number", + "x-cli-flag": "querier.max-fetched-series-per-query" + }, + "max_global_metadata_per_metric": { + "default": 0, + "description": "The maximum number of metadata per metric, across the cluster. 0 to disable.", + "type": "number", + "x-cli-flag": "ingester.max-global-metadata-per-metric" + }, + "max_global_metadata_per_user": { + "default": 0, + "description": "The maximum number of active metrics with metadata per user, across the cluster. 0 to disable. Supported only if -distributor.shard-by-all-labels is true.", + "type": "number", + "x-cli-flag": "ingester.max-global-metadata-per-user" + }, + "max_global_native_histogram_series_per_user": { + "default": 0, + "description": "The maximum number of active native histogram series per user, across the cluster before replication. 0 to disable. Supported only if -distributor.shard-by-all-labels and ingester.active-series-metrics-enabled is true.", + "type": "number", + "x-cli-flag": "ingester.max-global-native-histogram-series-per-user" + }, + "max_global_series_per_metric": { + "default": 0, + "description": "The maximum number of active series per metric name, across the cluster before replication. 0 to disable.", + "type": "number", + "x-cli-flag": "ingester.max-global-series-per-metric" + }, + "max_global_series_per_user": { + "default": 0, + "description": "The maximum number of active series per user, across the cluster before replication. 0 to disable. Supported only if -distributor.shard-by-all-labels is true.", + "type": "number", + "x-cli-flag": "ingester.max-global-series-per-user" + }, + "max_label_name_length": { + "default": 1024, + "description": "Maximum length accepted for label names", + "type": "number", + "x-cli-flag": "validation.max-length-label-name" + }, + "max_label_names_per_series": { + "default": 30, + "description": "Maximum number of label names per series.", + "type": "number", + "x-cli-flag": "validation.max-label-names-per-series" + }, + "max_label_value_length": { + "default": 2048, + "description": "Maximum length accepted for label value. This setting also applies to the metric name", + "type": "number", + "x-cli-flag": "validation.max-length-label-value" + }, + "max_labels_size_bytes": { + "default": 0, + "description": "Maximum combined size in bytes of all labels and label values accepted for a series. 0 to disable the limit.", + "type": "number", + "x-cli-flag": "validation.max-labels-size-bytes" + }, + "max_metadata_length": { + "default": 1024, + "description": "Maximum length accepted for metric metadata. Metadata refers to Metric Name, HELP and UNIT.", + "type": "number", + "x-cli-flag": "validation.max-metadata-length" + }, + "max_metadata_per_metric": { + "default": 10, + "description": "The maximum number of metadata per metric, per ingester. 0 to disable.", + "type": "number", + "x-cli-flag": "ingester.max-metadata-per-metric" + }, + "max_metadata_per_user": { + "default": 8000, + "description": "The maximum number of active metrics with metadata per user, per ingester. 0 to disable.", + "type": "number", + "x-cli-flag": "ingester.max-metadata-per-user" + }, + "max_native_histogram_buckets": { + "default": 0, + "description": "Limit on total number of positive and negative buckets allowed in a single native histogram. The resolution of a histogram with more buckets will be reduced until the number of buckets is within the limit. If the limit cannot be reached, the sample will be discarded. 0 means no limit. Enforced at Distributor.", + "type": "number", + "x-cli-flag": "validation.max-native-histogram-buckets" + }, + "max_native_histogram_sample_size_bytes": { + "default": 0, + "description": "Maximum size in bytes of a native histogram sample. 0 to disable the limit.", + "type": "number", + "x-cli-flag": "validation.max-native-histogram-sample-size-bytes" + }, + "max_native_histogram_series_per_user": { + "default": 0, + "description": "The maximum number of active native histogram series per user, per ingester. 0 to disable. Supported only if ingester.active-series-metrics-enabled is true.", + "type": "number", + "x-cli-flag": "ingester.max-native-histogram-series-per-user" + }, + "max_outstanding_requests_per_tenant": { + "default": 100, + "description": "Maximum number of outstanding requests per tenant per request queue (either query frontend or query scheduler); requests beyond this error with HTTP 429.", + "type": "number", + "x-cli-flag": "frontend.max-outstanding-requests-per-tenant" + }, + "max_queriers_per_tenant": { + "default": 0, + "description": "Maximum number of queriers that can handle requests for a single tenant. If set to 0 or value higher than number of available queriers, *all* queriers will handle requests for the tenant. If the value is \u003c 1, it will be treated as a percentage and the gets a percentage of the total queriers. Each frontend (or query-scheduler, if used) will select the same set of queriers for the same tenant (given that all queriers are connected to all frontends / query-schedulers). This option only works with queriers connecting to the query-frontend / query-scheduler, not when using downstream URL.", + "type": "number", + "x-cli-flag": "frontend.max-queriers-per-tenant" + }, + "max_query_length": { + "default": "0s", + "description": "Limit the query time range (end - start time of range query parameter and max - min of data fetched time range). This limit is enforced in the query-frontend and ruler (on the received query). 0 to disable.", + "type": "string", + "x-cli-flag": "store.max-query-length", + "x-format": "duration" + }, + "max_query_lookback": { + "default": "0s", + "description": "Limit how long back data (series and metadata) can be queried, up until \u003clookback\u003e duration ago. This limit is enforced in the query-frontend, querier and ruler. If the requested time range is outside the allowed range, the request will not fail but will be manipulated to only query data within the allowed time range. 0 to disable.", + "type": "string", + "x-cli-flag": "querier.max-query-lookback", + "x-format": "duration" + }, + "max_query_parallelism": { + "default": 14, + "description": "Maximum number of split queries will be scheduled in parallel by the frontend.", + "type": "number", + "x-cli-flag": "querier.max-query-parallelism" + }, + "max_query_response_size": { + "default": 0, + "description": "The maximum total uncompressed query response size. If the query was sharded the limit is applied to the total response size of all shards. This limit is enforced in query-frontend for `query` and `query_range` APIs. 0 to disable.", + "type": "number", + "x-cli-flag": "frontend.max-query-response-size" + }, + "max_series_per_metric": { + "default": 50000, + "description": "The maximum number of active series per metric name, per ingester. 0 to disable.", + "type": "number", + "x-cli-flag": "ingester.max-series-per-metric" + }, + "max_series_per_user": { + "default": 5000000, + "description": "The maximum number of active series per user, per ingester. 0 to disable.", + "type": "number", + "x-cli-flag": "ingester.max-series-per-user" + }, + "metric_relabel_configs": { + "default": [], + "description": "List of metric relabel configurations. Note that in most situations, it is more effective to use metrics relabeling directly in the Prometheus server, e.g. remote_write.write_relabel_configs.", + "type": "string" + }, + "native_histogram_ingestion_burst_size": { + "default": 0, + "description": "Per-user allowed native histogram ingestion burst size (in number of samples)", + "type": "number", + "x-cli-flag": "distributor.native-histogram-ingestion-burst-size" + }, + "native_histogram_ingestion_rate": { + "default": 1.7976931348623157e+308, + "description": "Per-user native histogram ingestion rate limit in samples per second. Disabled by default", + "type": "number", + "x-cli-flag": "distributor.native-histogram-ingestion-rate-limit" + }, + "out_of_order_time_window": { + "default": "0s", + "description": "[Experimental] Configures the allowed time window for ingestion of out-of-order samples. Disabled (0s) by default.", + "type": "string", + "x-cli-flag": "ingester.out-of-order-time-window", + "x-format": "duration" + }, + "parquet_converter_enabled": { + "default": false, + "description": "If set, enables the Parquet converter to create the parquet files.", + "type": "boolean", + "x-cli-flag": "parquet-converter.enabled" + }, + "parquet_converter_sort_columns": { + "default": [], + "description": "Additional label names for specific tenants to sort by after metric name, in order of precedence. These are applied during Parquet file generation.", + "items": { + "type": "string" + }, + "type": "array", + "x-cli-flag": "parquet-converter.sort-columns" + }, + "parquet_converter_tenant_shard_size": { + "default": 0, + "description": "The default tenant's shard size when the shuffle-sharding strategy is used by the parquet converter. When this setting is specified in the per-tenant overrides, a value of 0 disables shuffle sharding for the tenant. If the value is \u003c 1 and \u003e 0 the shard size will be a percentage of the total parquet converters.", + "type": "number", + "x-cli-flag": "parquet-converter.tenant-shard-size" + }, + "parquet_max_fetched_chunk_bytes": { + "default": 0, + "description": "The maximum number of bytes that can be used to fetch chunk column pages when querying parquet storage. 0 to disable.", + "type": "number", + "x-cli-flag": "querier.parquet-queryable.max-fetched-chunk-bytes" + }, + "parquet_max_fetched_data_bytes": { + "default": 0, + "description": "The maximum number of bytes that can be used to fetch all column pages when querying parquet storage. 0 to disable.", + "type": "number", + "x-cli-flag": "querier.parquet-queryable.max-fetched-data-bytes" + }, + "parquet_max_fetched_row_count": { + "default": 0, + "description": "The maximum number of rows that can be fetched when querying parquet storage. Each row maps to a series in a parquet file. This limit applies before materializing chunks. 0 to disable.", + "type": "number", + "x-cli-flag": "querier.parquet-queryable.max-fetched-row-count" + }, + "promote_resource_attributes": { + "description": "Comma separated list of resource attributes that should be converted to labels.", + "items": { + "type": "string" + }, + "type": "array", + "x-cli-flag": "distributor.promote-resource-attributes" + }, + "query_partial_data": { + "default": false, + "description": "Enable to allow queries to be evaluated with data from a single zone, if other zones are not available.", + "type": "boolean" + }, + "query_priority": { + "description": "Configuration for query priority.", + "properties": { + "default_priority": { + "default": 0, + "description": "Priority assigned to all queries by default. Must be a unique value. Use this as a baseline to make certain queries higher/lower priority.", + "type": "number", + "x-cli-flag": "frontend.query-priority.default-priority" + }, + "enabled": { + "default": false, + "description": "Whether queries are assigned with priorities.", + "type": "boolean", + "x-cli-flag": "frontend.query-priority.enabled" + }, + "priorities": { + "default": [], + "description": "List of priority definitions.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "query_rejection": { + "description": "Configuration for query rejection.", + "properties": { + "enabled": { + "default": false, + "description": "Whether query rejection is enabled.", + "type": "boolean", + "x-cli-flag": "frontend.query-rejection.enabled" + }, + "query_attributes": { + "default": [], + "description": "List of query_attributes to match and reject queries. A query is rejected if it matches any query_attribute in this list. Each query_attribute has several properties (e.g., regex, time_window, user_agent), and all specified properties must match for a query_attribute to be considered a match. Only the specified properties are checked, and an AND operator is applied to them.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "query_vertical_shard_size": { + "default": 0, + "description": "[Experimental] Number of shards to use when distributing shardable PromQL queries.", + "type": "number", + "x-cli-flag": "frontend.query-vertical-shard-size" + }, + "reject_old_samples": { + "default": false, + "description": "Reject old samples.", + "type": "boolean", + "x-cli-flag": "validation.reject-old-samples" + }, + "reject_old_samples_max_age": { + "default": "2w", + "description": "Maximum accepted sample age before rejecting.", + "type": "string", + "x-cli-flag": "validation.reject-old-samples.max-age", + "x-format": "duration" + }, + "ruler_evaluation_delay_duration": { + "default": "0s", + "description": "Deprecated(use ruler.query-offset instead) and will be removed in v1.19.0: Duration to delay the evaluation of rules to ensure the underlying metrics have been pushed to Cortex.", + "type": "string", + "x-cli-flag": "ruler.evaluation-delay-duration", + "x-format": "duration" + }, + "ruler_external_labels": { + "additionalProperties": true, + "default": [], + "description": "external labels for alerting rules", + "type": "object" + }, + "ruler_max_rule_groups_per_tenant": { + "default": 0, + "description": "Maximum number of rule groups per-tenant. 0 to disable.", + "type": "number", + "x-cli-flag": "ruler.max-rule-groups-per-tenant" + }, + "ruler_max_rules_per_rule_group": { + "default": 0, + "description": "Maximum number of rules per rule group per-tenant. 0 to disable.", + "type": "number", + "x-cli-flag": "ruler.max-rules-per-rule-group" + }, + "ruler_query_offset": { + "default": "0s", + "description": "Duration to offset all rule evaluation queries per-tenant.", + "type": "string", + "x-cli-flag": "ruler.query-offset", + "x-format": "duration" + }, + "ruler_tenant_shard_size": { + "default": 0, + "description": "The default tenant's shard size when the shuffle-sharding strategy is used by ruler. When this setting is specified in the per-tenant overrides, a value of 0 disables shuffle sharding for the tenant. If the value is \u003c 1 the shard size will be a percentage of the total rulers.", + "type": "number", + "x-cli-flag": "ruler.tenant-shard-size" + }, + "rules_partial_data": { + "default": false, + "description": "Enable to allow rules to be evaluated with data from a single zone, if other zones are not available.", + "type": "boolean" + }, + "s3_sse_kms_encryption_context": { + "description": "S3 server-side encryption KMS encryption context. If unset and the key ID override is set, the encryption context will not be provided to S3. Ignored if the SSE type override is not set.", + "type": "string" + }, + "s3_sse_kms_key_id": { + "description": "S3 server-side encryption KMS Key ID. Ignored if the SSE type override is not set.", + "type": "string" + }, + "s3_sse_type": { + "description": "S3 server-side encryption type. Required to enable server-side encryption overrides for a specific tenant. If not set, the default S3 client settings are used.", + "type": "string" + }, + "store_gateway_tenant_shard_size": { + "default": 0, + "description": "The default tenant's shard size when the shuffle-sharding strategy is used. Must be set when the store-gateway sharding is enabled with the shuffle-sharding strategy. When this setting is specified in the per-tenant overrides, a value of 0 disables shuffle sharding for the tenant. If the value is \u003c 1 the shard size will be a percentage of the total store-gateways.", + "type": "number", + "x-cli-flag": "store-gateway.tenant-shard-size" + } + }, + "type": "object" + }, + "memberlist_config": { + "description": "The memberlist_config configures the Gossip memberlist.", + "properties": { + "abort_if_cluster_join_fails": { + "default": true, + "description": "If this node fails to join memberlist cluster, abort.", + "type": "boolean", + "x-cli-flag": "memberlist.abort-if-join-fails" + }, + "advertise_addr": { + "description": "Gossip address to advertise to other members in the cluster. Used for NAT traversal.", + "type": "string", + "x-cli-flag": "memberlist.advertise-addr" + }, + "advertise_port": { + "default": 7946, + "description": "Gossip port to advertise to other members in the cluster. Used for NAT traversal.", + "type": "number", + "x-cli-flag": "memberlist.advertise-port" + }, + "bind_addr": { + "default": [], + "description": "IP address to listen on for gossip messages. Multiple addresses may be specified. Defaults to 0.0.0.0", + "items": { + "type": "string" + }, + "type": "array", + "x-cli-flag": "memberlist.bind-addr" + }, + "bind_port": { + "default": 7946, + "description": "Port to listen on for gossip messages.", + "type": "number", + "x-cli-flag": "memberlist.bind-port" + }, + "compression_enabled": { + "default": true, + "description": "Enable message compression. This can be used to reduce bandwidth usage at the cost of slightly more CPU utilization.", + "type": "boolean", + "x-cli-flag": "memberlist.compression-enabled" + }, + "dead_node_reclaim_time": { + "default": "0s", + "description": "How soon can dead node's name be reclaimed with new address. 0 to disable.", + "type": "string", + "x-cli-flag": "memberlist.dead-node-reclaim-time", + "x-format": "duration" + }, + "gossip_interval": { + "default": "200ms", + "description": "How often to gossip.", + "type": "string", + "x-cli-flag": "memberlist.gossip-interval", + "x-format": "duration" + }, + "gossip_nodes": { + "default": 3, + "description": "How many nodes to gossip to.", + "type": "number", + "x-cli-flag": "memberlist.gossip-nodes" + }, + "gossip_to_dead_nodes_time": { + "default": "30s", + "description": "How long to keep gossiping to dead nodes, to give them chance to refute their death.", + "type": "string", + "x-cli-flag": "memberlist.gossip-to-dead-nodes-time", + "x-format": "duration" + }, + "join_members": { + "default": [], + "description": "Other cluster members to join. Can be specified multiple times. It can be an IP, hostname or an entry specified in the DNS Service Discovery format.", + "items": { + "type": "string" + }, + "type": "array", + "x-cli-flag": "memberlist.join" + }, + "leave_timeout": { + "default": "5s", + "description": "Timeout for leaving memberlist cluster.", + "type": "string", + "x-cli-flag": "memberlist.leave-timeout", + "x-format": "duration" + }, + "left_ingesters_timeout": { + "default": "5m0s", + "description": "How long to keep LEFT ingesters in the ring.", + "type": "string", + "x-cli-flag": "memberlist.left-ingesters-timeout", + "x-format": "duration" + }, + "max_join_backoff": { + "default": "1m0s", + "description": "Max backoff duration to join other cluster members.", + "type": "string", + "x-cli-flag": "memberlist.max-join-backoff", + "x-format": "duration" + }, + "max_join_retries": { + "default": 10, + "description": "Max number of retries to join other cluster members.", + "type": "number", + "x-cli-flag": "memberlist.max-join-retries" + }, + "message_history_buffer_bytes": { + "default": 0, + "description": "How much space to use for keeping received and sent messages in memory for troubleshooting (two buffers). 0 to disable.", + "type": "number", + "x-cli-flag": "memberlist.message-history-buffer-bytes" + }, + "min_join_backoff": { + "default": "1s", + "description": "Min backoff duration to join other cluster members.", + "type": "string", + "x-cli-flag": "memberlist.min-join-backoff", + "x-format": "duration" + }, + "node_name": { + "description": "Name of the node in memberlist cluster. Defaults to hostname.", + "type": "string", + "x-cli-flag": "memberlist.nodename" + }, + "packet_dial_timeout": { + "default": "5s", + "description": "Timeout used when connecting to other nodes to send packet.", + "type": "string", + "x-cli-flag": "memberlist.packet-dial-timeout", + "x-format": "duration" + }, + "packet_write_timeout": { + "default": "5s", + "description": "Timeout for writing 'packet' data.", + "type": "string", + "x-cli-flag": "memberlist.packet-write-timeout", + "x-format": "duration" + }, + "pull_push_interval": { + "default": "30s", + "description": "How often to use pull/push sync.", + "type": "string", + "x-cli-flag": "memberlist.pullpush-interval", + "x-format": "duration" + }, + "randomize_node_name": { + "default": true, + "description": "Add random suffix to the node name.", + "type": "boolean", + "x-cli-flag": "memberlist.randomize-node-name" + }, + "rejoin_interval": { + "default": "0s", + "description": "If not 0, how often to rejoin the cluster. Occasional rejoin can help to fix the cluster split issue, and is harmless otherwise. For example when using only few components as a seed nodes (via -memberlist.join), then it's recommended to use rejoin. If -memberlist.join points to dynamic service that resolves to all gossiping nodes (eg. Kubernetes headless service), then rejoin is not needed.", + "type": "string", + "x-cli-flag": "memberlist.rejoin-interval", + "x-format": "duration" + }, + "retransmit_factor": { + "default": 4, + "description": "Multiplication factor used when sending out messages (factor * log(N+1)).", + "type": "number", + "x-cli-flag": "memberlist.retransmit-factor" + }, + "stream_timeout": { + "default": "10s", + "description": "The timeout for establishing a connection with a remote node, and for read/write operations.", + "type": "string", + "x-cli-flag": "memberlist.stream-timeout", + "x-format": "duration" + }, + "tls_ca_path": { + "description": "Path to the CA certificates file to validate server certificate against. If not set, the host's root CA certificates are used.", + "type": "string", + "x-cli-flag": "memberlist.tls-ca-path" + }, + "tls_cert_path": { + "description": "Path to the client certificate file, which will be used for authenticating with the server. Also requires the key path to be configured.", + "type": "string", + "x-cli-flag": "memberlist.tls-cert-path" + }, + "tls_enabled": { + "default": false, + "description": "Enable TLS on the memberlist transport layer.", + "type": "boolean", + "x-cli-flag": "memberlist.tls-enabled" + }, + "tls_insecure_skip_verify": { + "default": false, + "description": "Skip validating server certificate.", + "type": "boolean", + "x-cli-flag": "memberlist.tls-insecure-skip-verify" + }, + "tls_key_path": { + "description": "Path to the key file for the client certificate. Also requires the client certificate to be configured.", + "type": "string", + "x-cli-flag": "memberlist.tls-key-path" + }, + "tls_server_name": { + "description": "Override the expected name on the server certificate.", + "type": "string", + "x-cli-flag": "memberlist.tls-server-name" + } + }, + "type": "object" + }, + "memcached_client_config": { + "description": "The memcached_client_config configures the client used to connect to Memcached.", + "properties": { + "addresses": { + "description": "EXPERIMENTAL: Comma separated addresses list in DNS Service Discovery format: https://cortexmetrics.io/docs/configuration/arguments/#dns-service-discovery", + "type": "string", + "x-cli-flag": "frontend.memcached.addresses" + }, + "circuit_breaker_consecutive_failures": { + "default": 10, + "description": "Trip circuit-breaker after this number of consecutive dial failures (if zero then circuit-breaker is disabled).", + "type": "number", + "x-cli-flag": "frontend.memcached.circuit-breaker-consecutive-failures" + }, + "circuit_breaker_interval": { + "default": "10s", + "description": "Reset circuit-breaker counts after this long (if zero then never reset).", + "type": "string", + "x-cli-flag": "frontend.memcached.circuit-breaker-interval", + "x-format": "duration" + }, + "circuit_breaker_timeout": { + "default": "10s", + "description": "Duration circuit-breaker remains open after tripping (if zero then 60 seconds is used).", + "type": "string", + "x-cli-flag": "frontend.memcached.circuit-breaker-timeout", + "x-format": "duration" + }, + "consistent_hash": { + "default": true, + "description": "Use consistent hashing to distribute to memcache servers.", + "type": "boolean", + "x-cli-flag": "frontend.memcached.consistent-hash" + }, + "host": { + "description": "Hostname for memcached service to use. If empty and if addresses is unset, no memcached will be used.", + "type": "string", + "x-cli-flag": "frontend.memcached.hostname" + }, + "max_idle_conns": { + "default": 16, + "description": "Maximum number of idle connections in pool.", + "type": "number", + "x-cli-flag": "frontend.memcached.max-idle-conns" + }, + "max_item_size": { + "default": 0, + "description": "The maximum size of an item stored in memcached. Bigger items are not stored. If set to 0, no maximum size is enforced.", + "type": "number", + "x-cli-flag": "frontend.memcached.max-item-size" + }, + "service": { + "default": "memcached", + "description": "SRV service used to discover memcache servers.", + "type": "string", + "x-cli-flag": "frontend.memcached.service" + }, + "timeout": { + "default": "100ms", + "description": "Maximum time to wait before giving up on memcached requests.", + "type": "string", + "x-cli-flag": "frontend.memcached.timeout", + "x-format": "duration" + }, + "update_interval": { + "default": "1m0s", + "description": "Period with which to poll DNS for memcache servers.", + "type": "string", + "x-cli-flag": "frontend.memcached.update-interval", + "x-format": "duration" + } + }, + "type": "object" + }, + "memcached_config": { + "description": "The memcached_config block configures how data is stored in Memcached (ie. expiration).", + "properties": { + "batch_size": { + "default": 1024, + "description": "How many keys to fetch in each batch.", + "type": "number", + "x-cli-flag": "frontend.memcached.batchsize" + }, + "expiration": { + "default": "0s", + "description": "How long keys stay in the memcache.", + "type": "string", + "x-cli-flag": "frontend.memcached.expiration", + "x-format": "duration" + }, + "parallelism": { + "default": 100, + "description": "Maximum active requests to memcache.", + "type": "number", + "x-cli-flag": "frontend.memcached.parallelism" + } + }, + "type": "object" + }, + "querier_config": { + "description": "The querier_config configures the Cortex querier.", + "properties": { + "active_query_tracker_dir": { + "default": "./active-query-tracker", + "description": "Active query tracker monitors active queries, and writes them to the file in given directory. If Cortex discovers any queries in this log during startup, it will log them to the log file. Setting to empty value disables active query tracker, which also disables -querier.max-concurrent option.", + "type": "string", + "x-cli-flag": "querier.active-query-tracker-dir" + }, + "default_evaluation_interval": { + "default": "1m0s", + "description": "The default evaluation interval or step size for subqueries.", + "type": "string", + "x-cli-flag": "querier.default-evaluation-interval", + "x-format": "duration" + }, + "enable_parquet_queryable": { + "default": false, + "description": "[Experimental] If true, querier will try to query the parquet files if available.", + "type": "boolean", + "x-cli-flag": "querier.enable-parquet-queryable" + }, + "enable_promql_experimental_functions": { + "default": false, + "description": "[Experimental] If true, experimental promQL functions are enabled.", + "type": "boolean", + "x-cli-flag": "querier.enable-promql-experimental-functions" + }, + "ignore_max_query_length": { + "default": false, + "description": "If enabled, ignore max query length check at Querier select method. Users can choose to ignore it since the validation can be done before Querier evaluation like at Query Frontend or Ruler.", + "type": "boolean", + "x-cli-flag": "querier.ignore-max-query-length" + }, + "ingester_label_names_with_matchers": { + "default": false, + "description": "Use LabelNames ingester RPCs with match params.", + "type": "boolean", + "x-cli-flag": "querier.ingester-label-names-with-matchers" + }, + "ingester_metadata_streaming": { + "default": true, + "description": "Deprecated (This feature will be always on after v1.18): Use streaming RPCs for metadata APIs from ingester.", + "type": "boolean", + "x-cli-flag": "querier.ingester-metadata-streaming" + }, + "ingester_query_max_attempts": { + "default": 1, + "description": "The maximum number of times we attempt fetching data from ingesters for retryable errors (ex. partial data returned).", + "type": "number", + "x-cli-flag": "querier.ingester-query-max-attempts" + }, + "lookback_delta": { + "default": "5m0s", + "description": "Time since the last sample after which a time series is considered stale and ignored by expression evaluations.", + "type": "string", + "x-cli-flag": "querier.lookback-delta", + "x-format": "duration" + }, + "max_concurrent": { + "default": 20, + "description": "The maximum number of concurrent queries.", + "type": "number", + "x-cli-flag": "querier.max-concurrent" + }, + "max_query_into_future": { + "default": "10m0s", + "description": "Maximum duration into the future you can query. 0 to disable.", + "type": "string", + "x-cli-flag": "querier.max-query-into-future", + "x-format": "duration" + }, + "max_samples": { + "default": 50000000, + "description": "Maximum number of samples a single query can load into memory.", + "type": "number", + "x-cli-flag": "querier.max-samples" + }, + "max_subquery_steps": { + "default": 0, + "description": "Max number of steps allowed for every subquery expression in query. Number of steps is calculated using subquery range / step. A value \u003e 0 enables it.", + "type": "number", + "x-cli-flag": "querier.max-subquery-steps" + }, + "parquet_queryable_default_block_store": { + "default": "parquet", + "description": "[Experimental] Parquet queryable's default block store to query. Valid options are tsdb and parquet. If it is set to tsdb, parquet queryable always fallback to store gateway.", + "type": "string", + "x-cli-flag": "querier.parquet-queryable-default-block-store" + }, + "parquet_queryable_fallback_disabled": { + "default": false, + "description": "[Experimental] Disable Parquet queryable to fallback queries to Store Gateway if the block is not available as Parquet files but available in TSDB. Setting this to true will disable the fallback and users can remove Store Gateway. But need to make sure Parquet files are created before it is queryable.", + "type": "boolean", + "x-cli-flag": "querier.parquet-queryable-fallback-disabled" + }, + "parquet_queryable_shard_cache_size": { + "default": 512, + "description": "[Experimental] Maximum size of the Parquet queryable shard cache. 0 to disable.", + "type": "number", + "x-cli-flag": "querier.parquet-queryable-shard-cache-size" + }, + "per_step_stats_enabled": { + "default": false, + "description": "Enable returning samples stats per steps in query response.", + "type": "boolean", + "x-cli-flag": "querier.per-step-stats-enabled" + }, + "query_ingesters_within": { + "default": "0s", + "description": "Maximum lookback beyond which queries are not sent to ingester. 0 means all queries are sent to ingester.", + "type": "string", + "x-cli-flag": "querier.query-ingesters-within", + "x-format": "duration" + }, + "query_store_after": { + "default": "0s", + "description": "The time after which a metric should be queried from storage and not just ingesters. 0 means all queries are sent to store. When running the blocks storage, if this option is enabled, the time range of the query sent to the store will be manipulated to ensure the query end is not more recent than 'now - query-store-after'.", + "type": "string", + "x-cli-flag": "querier.query-store-after", + "x-format": "duration" + }, + "response_compression": { + "default": "gzip", + "description": "Use compression for metrics query API or instant and range query APIs. Supported compression 'gzip', 'snappy', 'zstd' and '' (disable compression)", + "type": "string", + "x-cli-flag": "querier.response-compression" + }, + "shuffle_sharding_ingesters_lookback_period": { + "default": "0s", + "description": "When distributor's sharding strategy is shuffle-sharding and this setting is \u003e 0, queriers fetch in-memory series from the minimum set of required ingesters, selecting only ingesters which may have received series since 'now - lookback period'. The lookback period should be greater or equal than the configured 'query store after' and 'query ingesters within'. If this setting is 0, queriers always query all ingesters (ingesters shuffle sharding on read path is disabled).", + "type": "string", + "x-cli-flag": "querier.shuffle-sharding-ingesters-lookback-period", + "x-format": "duration" + }, + "store_gateway_addresses": { + "description": "Comma separated list of store-gateway addresses in DNS Service Discovery format. This option should be set when using the blocks storage and the store-gateway sharding is disabled (when enabled, the store-gateway instances form a ring and addresses are picked from the ring).", + "type": "string", + "x-cli-flag": "querier.store-gateway-addresses" + }, + "store_gateway_client": { + "properties": { + "connect_timeout": { + "default": "5s", + "description": "The maximum amount of time to establish a connection. A value of 0 means using default gRPC client connect timeout 5s.", + "type": "string", + "x-cli-flag": "querier.store-gateway-client.connect-timeout", + "x-format": "duration" + }, + "grpc_compression": { + "description": "Use compression when sending messages. Supported values are: 'gzip', 'snappy' and '' (disable compression)", + "type": "string", + "x-cli-flag": "querier.store-gateway-client.grpc-compression" + }, + "healthcheck_config": { + "description": "EXPERIMENTAL: If enabled, gRPC clients perform health checks for each target and fail the request if the target is marked as unhealthy.", + "properties": { + "interval": { + "default": "5s", + "description": "The approximate amount of time between health checks of an individual target.", + "type": "string", + "x-cli-flag": "querier.store-gateway-client.healthcheck.interval", + "x-format": "duration" + }, + "timeout": { + "default": "1s", + "description": "The amount of time during which no response from a target means a failed health check.", + "type": "string", + "x-cli-flag": "querier.store-gateway-client.healthcheck.timeout", + "x-format": "duration" + }, + "unhealthy_threshold": { + "default": 0, + "description": "The number of consecutive failed health checks required before considering a target unhealthy. 0 means disabled.", + "type": "number", + "x-cli-flag": "querier.store-gateway-client.healthcheck.unhealthy-threshold" + } + }, + "type": "object" + }, + "tls_ca_path": { + "description": "Path to the CA certificates file to validate server certificate against. If not set, the host's root CA certificates are used.", + "type": "string", + "x-cli-flag": "querier.store-gateway-client.tls-ca-path" + }, + "tls_cert_path": { + "description": "Path to the client certificate file, which will be used for authenticating with the server. Also requires the key path to be configured.", + "type": "string", + "x-cli-flag": "querier.store-gateway-client.tls-cert-path" + }, + "tls_enabled": { + "default": false, + "description": "Enable TLS for gRPC client connecting to store-gateway.", + "type": "boolean", + "x-cli-flag": "querier.store-gateway-client.tls-enabled" + }, + "tls_insecure_skip_verify": { + "default": false, + "description": "Skip validating server certificate.", + "type": "boolean", + "x-cli-flag": "querier.store-gateway-client.tls-insecure-skip-verify" + }, + "tls_key_path": { + "description": "Path to the key file for the client certificate. Also requires the client certificate to be configured.", + "type": "string", + "x-cli-flag": "querier.store-gateway-client.tls-key-path" + }, + "tls_server_name": { + "description": "Override the expected name on the server certificate.", + "type": "string", + "x-cli-flag": "querier.store-gateway-client.tls-server-name" + } + }, + "type": "object" + }, + "store_gateway_consistency_check_max_attempts": { + "default": 3, + "description": "The maximum number of times we attempt fetching missing blocks from different store-gateways. If no more store-gateways are left (ie. due to lower replication factor) than we'll end the retries earlier", + "type": "number", + "x-cli-flag": "querier.store-gateway-consistency-check-max-attempts" + }, + "store_gateway_query_stats": { + "default": true, + "description": "If enabled, store gateway query stats will be logged using `info` log level.", + "type": "boolean", + "x-cli-flag": "querier.store-gateway-query-stats-enabled" + }, + "thanos_engine": { + "properties": { + "enable_x_functions": { + "default": false, + "description": "Enable xincrease, xdelta, xrate etc from Thanos engine.", + "type": "boolean", + "x-cli-flag": "querier.enable-x-functions" + }, + "enabled": { + "default": false, + "description": "Experimental. Use Thanos promql engine https://github.com/thanos-io/promql-engine rather than the Prometheus promql engine.", + "type": "boolean", + "x-cli-flag": "querier.thanos-engine" + }, + "optimizers": { + "default": "default", + "description": "Logical plan optimizers. Multiple optimizers can be provided as a comma-separated list. Supported values: default, all, propagate-matchers, sort-matchers, merge-selects, detect-histogram-stats", + "type": "string", + "x-cli-flag": "querier.optimizers" + } + }, + "type": "object" + }, + "timeout": { + "default": "2m0s", + "description": "The timeout for a query.", + "type": "string", + "x-cli-flag": "querier.timeout", + "x-format": "duration" + } + }, + "type": "object" + }, + "query_frontend_config": { + "description": "The query_frontend_config configures the Cortex query-frontend.", + "properties": { + "downstream_url": { + "description": "URL of downstream Prometheus.", + "type": "string", + "x-cli-flag": "frontend.downstream-url" + }, + "enabled_ruler_query_stats_log": { + "default": false, + "description": "If enabled, report the query stats log for queries coming from the ruler to evaluate rules. It only takes effect when '-ruler.frontend-address' is configured.", + "type": "boolean", + "x-cli-flag": "frontend.enabled-ruler-query-stats" + }, + "grpc_client_config": { + "properties": { + "backoff_config": { + "properties": { + "max_period": { + "default": "10s", + "description": "Maximum delay when backing off.", + "type": "string", + "x-cli-flag": "frontend.grpc-client-config.backoff-max-period", + "x-format": "duration" + }, + "max_retries": { + "default": 10, + "description": "Number of times to backoff and retry before failing.", + "type": "number", + "x-cli-flag": "frontend.grpc-client-config.backoff-retries" + }, + "min_period": { + "default": "100ms", + "description": "Minimum delay when backing off.", + "type": "string", + "x-cli-flag": "frontend.grpc-client-config.backoff-min-period", + "x-format": "duration" + } + }, + "type": "object" + }, + "backoff_on_ratelimits": { + "default": false, + "description": "Enable backoff and retry when we hit ratelimits.", + "type": "boolean", + "x-cli-flag": "frontend.grpc-client-config.backoff-on-ratelimits" + }, + "connect_timeout": { + "default": "5s", + "description": "The maximum amount of time to establish a connection. A value of 0 means using default gRPC client connect timeout 20s.", + "type": "string", + "x-cli-flag": "frontend.grpc-client-config.connect-timeout", + "x-format": "duration" + }, + "grpc_compression": { + "description": "Use compression when sending messages. Supported values are: 'gzip', 'snappy', 'snappy-block' ,'zstd' and '' (disable compression)", + "type": "string", + "x-cli-flag": "frontend.grpc-client-config.grpc-compression" + }, + "max_recv_msg_size": { + "default": 104857600, + "description": "gRPC client max receive message size (bytes).", + "type": "number", + "x-cli-flag": "frontend.grpc-client-config.grpc-max-recv-msg-size" + }, + "max_send_msg_size": { + "default": 16777216, + "description": "gRPC client max send message size (bytes).", + "type": "number", + "x-cli-flag": "frontend.grpc-client-config.grpc-max-send-msg-size" + }, + "rate_limit": { + "default": 0, + "description": "Rate limit for gRPC client; 0 means disabled.", + "type": "number", + "x-cli-flag": "frontend.grpc-client-config.grpc-client-rate-limit" + }, + "rate_limit_burst": { + "default": 0, + "description": "Rate limit burst for gRPC client.", + "type": "number", + "x-cli-flag": "frontend.grpc-client-config.grpc-client-rate-limit-burst" + }, + "tls_ca_path": { + "description": "Path to the CA certificates file to validate server certificate against. If not set, the host's root CA certificates are used.", + "type": "string", + "x-cli-flag": "frontend.grpc-client-config.tls-ca-path" + }, + "tls_cert_path": { + "description": "Path to the client certificate file, which will be used for authenticating with the server. Also requires the key path to be configured.", + "type": "string", + "x-cli-flag": "frontend.grpc-client-config.tls-cert-path" + }, + "tls_enabled": { + "default": false, + "description": "Enable TLS in the GRPC client. This flag needs to be enabled when any other TLS flag is set. If set to false, insecure connection to gRPC server will be used.", + "type": "boolean", + "x-cli-flag": "frontend.grpc-client-config.tls-enabled" + }, + "tls_insecure_skip_verify": { + "default": false, + "description": "Skip validating server certificate.", + "type": "boolean", + "x-cli-flag": "frontend.grpc-client-config.tls-insecure-skip-verify" + }, + "tls_key_path": { + "description": "Path to the key file for the client certificate. Also requires the client certificate to be configured.", + "type": "string", + "x-cli-flag": "frontend.grpc-client-config.tls-key-path" + }, + "tls_server_name": { + "description": "Override the expected name on the server certificate.", + "type": "string", + "x-cli-flag": "frontend.grpc-client-config.tls-server-name" + } + }, + "type": "object" + }, + "instance_interface_names": { + "default": "[eth0 en0]", + "description": "Name of network interface to read address from. This address is sent to query-scheduler and querier, which uses it to send the query response back to query-frontend.", + "items": { + "type": "string" + }, + "type": "array", + "x-cli-flag": "frontend.instance-interface-names" + }, + "log_queries_longer_than": { + "default": "0s", + "description": "Log queries that are slower than the specified duration. Set to 0 to disable. Set to \u003c 0 to enable on all queries.", + "type": "string", + "x-cli-flag": "frontend.log-queries-longer-than", + "x-format": "duration" + }, + "max_body_size": { + "default": 10485760, + "description": "Max body size for downstream prometheus.", + "type": "number", + "x-cli-flag": "frontend.max-body-size" + }, + "querier_forget_delay": { + "default": "0s", + "description": "If a querier disconnects without sending notification about graceful shutdown, the query-frontend will keep the querier in the tenant's shard until the forget delay has passed. This feature is useful to reduce the blast radius when shuffle-sharding is enabled.", + "type": "string", + "x-cli-flag": "query-frontend.querier-forget-delay", + "x-format": "duration" + }, + "query_stats_enabled": { + "default": false, + "description": "True to enable query statistics tracking. When enabled, a message with some statistics is logged for every query.", + "type": "boolean", + "x-cli-flag": "frontend.query-stats-enabled" + }, + "retry_on_too_many_outstanding_requests": { + "default": false, + "description": "When multiple query-schedulers are available, re-enqueue queries that were rejected due to too many outstanding requests.", + "type": "boolean", + "x-cli-flag": "frontend.retry-on-too-many-outstanding-requests" + }, + "scheduler_address": { + "description": "DNS hostname used for finding query-schedulers.", + "type": "string", + "x-cli-flag": "frontend.scheduler-address" + }, + "scheduler_dns_lookup_period": { + "default": "10s", + "description": "How often to resolve the scheduler-address, in order to look for new query-scheduler instances.", + "type": "string", + "x-cli-flag": "frontend.scheduler-dns-lookup-period", + "x-format": "duration" + }, + "scheduler_worker_concurrency": { + "default": 5, + "description": "Number of concurrent workers forwarding queries to single query-scheduler.", + "type": "number", + "x-cli-flag": "frontend.scheduler-worker-concurrency" + } + }, + "type": "object" + }, + "query_range_config": { + "description": "The query_range_config configures the query splitting and caching in the Cortex query-frontend.", + "properties": { + "align_queries_with_step": { + "default": false, + "description": "Mutate incoming queries to align their start and end with their step.", + "type": "boolean", + "x-cli-flag": "querier.align-querier-with-step" + }, + "cache_results": { + "default": false, + "description": "Cache query results.", + "type": "boolean", + "x-cli-flag": "querier.cache-results" + }, + "dynamic_query_splits": { + "properties": { + "enable_dynamic_vertical_sharding": { + "default": false, + "description": "[EXPERIMENTAL] Dynamically adjust vertical shard size to maximize the total combined number of query shards and splits.", + "type": "boolean", + "x-cli-flag": "querier.enable-dynamic-vertical-sharding" + }, + "max_fetched_data_duration_per_query": { + "default": "0s", + "description": "[EXPERIMENTAL] Max total duration of data fetched from storage by all query shards, 0 disables it. Dynamically uses a multiple of split interval to maintain a total fetched duration of data lower than the value set. It takes into account additional duration fetched by matrix selectors and subqueries.", + "type": "string", + "x-cli-flag": "querier.max-fetched-data-duration-per-query", + "x-format": "duration" + }, + "max_shards_per_query": { + "default": 0, + "description": "[EXPERIMENTAL] Maximum number of shards for a query, 0 disables it. Dynamically uses a multiple of split interval to maintain a total number of shards below the set value. If vertical sharding is enabled for a query, the combined total number of interval splits and vertical shards is kept below this value.", + "type": "number", + "x-cli-flag": "querier.max-shards-per-query" + } + }, + "type": "object" + }, + "forward_headers_list": { + "default": [], + "description": "List of headers forwarded by the query Frontend to downstream querier.", + "items": { + "type": "string" + }, + "type": "array", + "x-cli-flag": "frontend.forward-headers-list" + }, + "max_retries": { + "default": 5, + "description": "Maximum number of retries for a single request; beyond this, the downstream error is returned.", + "type": "number", + "x-cli-flag": "querier.max-retries-per-request" + }, + "results_cache": { + "properties": { + "cache": { + "properties": { + "background": { + "properties": { + "writeback_buffer": { + "default": 10000, + "description": "How many key batches to buffer for background write-back.", + "type": "number", + "x-cli-flag": "frontend.background.write-back-buffer" + }, + "writeback_goroutines": { + "default": 10, + "description": "At what concurrency to write back to cache.", + "type": "number", + "x-cli-flag": "frontend.background.write-back-concurrency" + } + }, + "type": "object" + }, + "default_validity": { + "default": "0s", + "description": "The default validity of entries for caches unless overridden.", + "type": "string", + "x-cli-flag": "frontend.default-validity", + "x-format": "duration" + }, + "enable_fifocache": { + "default": false, + "description": "Enable in-memory cache.", + "type": "boolean", + "x-cli-flag": "frontend.cache.enable-fifocache" + }, + "fifocache": { + "$ref": "#/definitions/fifo_cache_config" + }, + "memcached": { + "$ref": "#/definitions/memcached_config" + }, + "memcached_client": { + "$ref": "#/definitions/memcached_client_config" + }, + "redis": { + "$ref": "#/definitions/redis_config" + } + }, + "type": "object" + }, + "cache_queryable_samples_stats": { + "default": false, + "description": "Cache Statistics queryable samples on results cache.", + "type": "boolean", + "x-cli-flag": "frontend.cache-queryable-samples-stats" + }, + "compression": { + "description": "Use compression in results cache. Supported values are: 'snappy' and '' (disable compression).", + "type": "string", + "x-cli-flag": "frontend.compression" + } + }, + "type": "object" + }, + "split_queries_by_interval": { + "default": "0s", + "description": "Split queries by an interval and execute in parallel, 0 disables it. You should use a multiple of 24 hours (same as the storage bucketing scheme), to avoid queriers downloading and processing the same chunks. This also determines how cache keys are chosen when result caching is enabled", + "type": "string", + "x-cli-flag": "querier.split-queries-by-interval", + "x-format": "duration" + } + }, + "type": "object" + }, + "redis_config": { + "description": "The redis_config configures the Redis backend cache.", + "properties": { + "db": { + "default": 0, + "description": "Database index.", + "type": "number", + "x-cli-flag": "frontend.redis.db" + }, + "endpoint": { + "description": "Redis Server endpoint to use for caching. A comma-separated list of endpoints for Redis Cluster or Redis Sentinel. If empty, no redis will be used.", + "type": "string", + "x-cli-flag": "frontend.redis.endpoint" + }, + "expiration": { + "default": "0s", + "description": "How long keys stay in the redis.", + "type": "string", + "x-cli-flag": "frontend.redis.expiration", + "x-format": "duration" + }, + "idle_timeout": { + "default": "0s", + "description": "Close connections after remaining idle for this duration. If the value is zero, then idle connections are not closed.", + "type": "string", + "x-cli-flag": "frontend.redis.idle-timeout", + "x-format": "duration" + }, + "master_name": { + "description": "Redis Sentinel master name. An empty string for Redis Server or Redis Cluster.", + "type": "string", + "x-cli-flag": "frontend.redis.master-name" + }, + "max_connection_age": { + "default": "0s", + "description": "Close connections older than this duration. If the value is zero, then the pool does not close connections based on age.", + "type": "string", + "x-cli-flag": "frontend.redis.max-connection-age", + "x-format": "duration" + }, + "password": { + "description": "Password to use when connecting to redis.", + "type": "string", + "x-cli-flag": "frontend.redis.password" + }, + "pool_size": { + "default": 0, + "description": "Maximum number of connections in the pool.", + "type": "number", + "x-cli-flag": "frontend.redis.pool-size" + }, + "timeout": { + "default": "500ms", + "description": "Maximum time to wait before giving up on redis requests.", + "type": "string", + "x-cli-flag": "frontend.redis.timeout", + "x-format": "duration" + }, + "tls_enabled": { + "default": false, + "description": "Enable connecting to redis with TLS.", + "type": "boolean", + "x-cli-flag": "frontend.redis.tls-enabled" + }, + "tls_insecure_skip_verify": { + "default": false, + "description": "Skip validating server certificate.", + "type": "boolean", + "x-cli-flag": "frontend.redis.tls-insecure-skip-verify" + } + }, + "type": "object" + }, + "ruler_config": { + "description": "The ruler_config configures the Cortex ruler.", + "properties": { + "alertmanager_client": { + "properties": { + "basic_auth_password": { + "description": "HTTP Basic authentication password. It overrides the password set in the URL (if any).", + "type": "string", + "x-cli-flag": "ruler.alertmanager-client.basic-auth-password" + }, + "basic_auth_username": { + "description": "HTTP Basic authentication username. It overrides the username set in the URL (if any).", + "type": "string", + "x-cli-flag": "ruler.alertmanager-client.basic-auth-username" + }, + "tls_ca_path": { + "description": "Path to the CA certificates file to validate server certificate against. If not set, the host's root CA certificates are used.", + "type": "string", + "x-cli-flag": "ruler.alertmanager-client.tls-ca-path" + }, + "tls_cert_path": { + "description": "Path to the client certificate file, which will be used for authenticating with the server. Also requires the key path to be configured.", + "type": "string", + "x-cli-flag": "ruler.alertmanager-client.tls-cert-path" + }, + "tls_insecure_skip_verify": { + "default": false, + "description": "Skip validating server certificate.", + "type": "boolean", + "x-cli-flag": "ruler.alertmanager-client.tls-insecure-skip-verify" + }, + "tls_key_path": { + "description": "Path to the key file for the client certificate. Also requires the client certificate to be configured.", + "type": "string", + "x-cli-flag": "ruler.alertmanager-client.tls-key-path" + }, + "tls_server_name": { + "description": "Override the expected name on the server certificate.", + "type": "string", + "x-cli-flag": "ruler.alertmanager-client.tls-server-name" + } + }, + "type": "object" + }, + "alertmanager_refresh_interval": { + "default": "1m0s", + "description": "How long to wait between refreshing DNS resolutions of Alertmanager hosts.", + "type": "string", + "x-cli-flag": "ruler.alertmanager-refresh-interval", + "x-format": "duration" + }, + "alertmanager_url": { + "description": "Comma-separated list of URL(s) of the Alertmanager(s) to send notifications to. Each Alertmanager URL is treated as a separate group in the configuration. Multiple Alertmanagers in HA per group can be supported by using DNS resolution via -ruler.alertmanager-discovery.", + "type": "string", + "x-cli-flag": "ruler.alertmanager-url" + }, + "api_deduplicate_rules": { + "default": false, + "description": "EXPERIMENTAL: Remove duplicate rules in the prometheus rules and alerts API response. If there are duplicate rules the rule with the latest evaluation timestamp will be kept.", + "type": "boolean", + "x-cli-flag": "experimental.ruler.api-deduplicate-rules" + }, + "concurrent_evals_enabled": { + "default": false, + "description": "If enabled, rules from a single rule group can be evaluated concurrently if there is no dependency between each other. Max concurrency for each rule group is controlled via ruler.max-concurrent-evals flag.", + "type": "boolean", + "x-cli-flag": "ruler.concurrent-evals-enabled" + }, + "disable_rule_group_label": { + "default": false, + "description": "Disable the rule_group label on exported metrics", + "type": "boolean", + "x-cli-flag": "ruler.disable-rule-group-label" + }, + "disabled_tenants": { + "description": "Comma separated list of tenants whose rules this ruler cannot evaluate. If specified, a ruler that would normally pick the specified tenant(s) for processing will ignore them instead. Subject to sharding.", + "type": "string", + "x-cli-flag": "ruler.disabled-tenants" + }, + "enable_alertmanager_discovery": { + "default": false, + "description": "Use DNS SRV records to discover Alertmanager hosts.", + "type": "boolean", + "x-cli-flag": "ruler.alertmanager-discovery" + }, + "enable_api": { + "default": false, + "description": "Enable the ruler api", + "type": "boolean", + "x-cli-flag": "experimental.ruler.enable-api" + }, + "enable_ha_evaluation": { + "default": false, + "description": "Enable high availability", + "type": "boolean", + "x-cli-flag": "ruler.enable-ha-evaluation" + }, + "enable_sharding": { + "default": false, + "description": "Distribute rule evaluation using ring backend", + "type": "boolean", + "x-cli-flag": "ruler.enable-sharding" + }, + "enabled_tenants": { + "description": "Comma separated list of tenants whose rules this ruler can evaluate. If specified, only these tenants will be handled by ruler, otherwise this ruler can process rules from all tenants. Subject to sharding.", + "type": "string", + "x-cli-flag": "ruler.enabled-tenants" + }, + "evaluation_interval": { + "default": "1m0s", + "description": "How frequently to evaluate rules", + "type": "string", + "x-cli-flag": "ruler.evaluation-interval", + "x-format": "duration" + }, + "external_labels": { + "additionalProperties": true, + "default": [], + "description": "Labels to add to all alerts.", + "type": "object" + }, + "external_url": { + "description": "URL of alerts return path.", + "format": "uri", + "type": "string", + "x-cli-flag": "ruler.external.url" + }, + "flush_period": { + "default": "1m0s", + "description": "Period with which to attempt to flush rule groups.", + "type": "string", + "x-cli-flag": "ruler.flush-period", + "x-format": "duration" + }, + "for_grace_period": { + "default": "10m0s", + "description": "Minimum duration between alert and restored \"for\" state. This is maintained only for alerts with configured \"for\" time greater than grace period.", + "type": "string", + "x-cli-flag": "ruler.for-grace-period", + "x-format": "duration" + }, + "for_outage_tolerance": { + "default": "1h0m0s", + "description": "Max time to tolerate outage for restoring \"for\" state of alert.", + "type": "string", + "x-cli-flag": "ruler.for-outage-tolerance", + "x-format": "duration" + }, + "frontend_address": { + "description": "[Experimental] GRPC listen address of the Query Frontend, in host:port format. If set, Ruler queries to Query Frontends via gRPC. If not set, ruler queries to Ingesters directly.", + "type": "string", + "x-cli-flag": "ruler.frontend-address" + }, + "frontend_client": { + "properties": { + "backoff_config": { + "properties": { + "max_period": { + "default": "10s", + "description": "Maximum delay when backing off.", + "type": "string", + "x-cli-flag": "ruler.frontendClient.backoff-max-period", + "x-format": "duration" + }, + "max_retries": { + "default": 10, + "description": "Number of times to backoff and retry before failing.", + "type": "number", + "x-cli-flag": "ruler.frontendClient.backoff-retries" + }, + "min_period": { + "default": "100ms", + "description": "Minimum delay when backing off.", + "type": "string", + "x-cli-flag": "ruler.frontendClient.backoff-min-period", + "x-format": "duration" + } + }, + "type": "object" + }, + "backoff_on_ratelimits": { + "default": false, + "description": "Enable backoff and retry when we hit ratelimits.", + "type": "boolean", + "x-cli-flag": "ruler.frontendClient.backoff-on-ratelimits" + }, + "connect_timeout": { + "default": "5s", + "description": "The maximum amount of time to establish a connection. A value of 0 means using default gRPC client connect timeout 20s.", + "type": "string", + "x-cli-flag": "ruler.frontendClient.connect-timeout", + "x-format": "duration" + }, + "grpc_compression": { + "description": "Use compression when sending messages. Supported values are: 'gzip', 'snappy', 'snappy-block' ,'zstd' and '' (disable compression)", + "type": "string", + "x-cli-flag": "ruler.frontendClient.grpc-compression" + }, + "max_recv_msg_size": { + "default": 104857600, + "description": "gRPC client max receive message size (bytes).", + "type": "number", + "x-cli-flag": "ruler.frontendClient.grpc-max-recv-msg-size" + }, + "max_send_msg_size": { + "default": 16777216, + "description": "gRPC client max send message size (bytes).", + "type": "number", + "x-cli-flag": "ruler.frontendClient.grpc-max-send-msg-size" + }, + "rate_limit": { + "default": 0, + "description": "Rate limit for gRPC client; 0 means disabled.", + "type": "number", + "x-cli-flag": "ruler.frontendClient.grpc-client-rate-limit" + }, + "rate_limit_burst": { + "default": 0, + "description": "Rate limit burst for gRPC client.", + "type": "number", + "x-cli-flag": "ruler.frontendClient.grpc-client-rate-limit-burst" + }, + "tls_ca_path": { + "description": "Path to the CA certificates file to validate server certificate against. If not set, the host's root CA certificates are used.", + "type": "string", + "x-cli-flag": "ruler.frontendClient.tls-ca-path" + }, + "tls_cert_path": { + "description": "Path to the client certificate file, which will be used for authenticating with the server. Also requires the key path to be configured.", + "type": "string", + "x-cli-flag": "ruler.frontendClient.tls-cert-path" + }, + "tls_enabled": { + "default": false, + "description": "Enable TLS in the GRPC client. This flag needs to be enabled when any other TLS flag is set. If set to false, insecure connection to gRPC server will be used.", + "type": "boolean", + "x-cli-flag": "ruler.frontendClient.tls-enabled" + }, + "tls_insecure_skip_verify": { + "default": false, + "description": "Skip validating server certificate.", + "type": "boolean", + "x-cli-flag": "ruler.frontendClient.tls-insecure-skip-verify" + }, + "tls_key_path": { + "description": "Path to the key file for the client certificate. Also requires the client certificate to be configured.", + "type": "string", + "x-cli-flag": "ruler.frontendClient.tls-key-path" + }, + "tls_server_name": { + "description": "Override the expected name on the server certificate.", + "type": "string", + "x-cli-flag": "ruler.frontendClient.tls-server-name" + } + }, + "type": "object" + }, + "liveness_check_timeout": { + "default": "1s", + "description": "Timeout duration for non-primary rulers during liveness checks. If the check times out, the non-primary ruler will evaluate the rule group. Applicable when ruler.enable-ha-evaluation is true.", + "type": "string", + "x-cli-flag": "ruler.liveness-check-timeout", + "x-format": "duration" + }, + "max_concurrent_evals": { + "default": 1, + "description": "Max concurrency for a single rule group to evaluate independent rules.", + "type": "number", + "x-cli-flag": "ruler.max-concurrent-evals" + }, + "notification_queue_capacity": { + "default": 10000, + "description": "Capacity of the queue for notifications to be sent to the Alertmanager.", + "type": "number", + "x-cli-flag": "ruler.notification-queue-capacity" + }, + "notification_timeout": { + "default": "10s", + "description": "HTTP timeout duration when sending notifications to the Alertmanager.", + "type": "string", + "x-cli-flag": "ruler.notification-timeout", + "x-format": "duration" + }, + "poll_interval": { + "default": "1m0s", + "description": "How frequently to poll for rule changes", + "type": "string", + "x-cli-flag": "ruler.poll-interval", + "x-format": "duration" + }, + "query_response_format": { + "default": "protobuf", + "description": "[Experimental] Query response format to get query results from Query Frontend when the rule evaluation. It will only take effect when `-ruler.frontend-address` is configured. Supported values: json,protobuf", + "type": "string", + "x-cli-flag": "ruler.query-response-format" + }, + "query_stats_enabled": { + "default": false, + "description": "Report query statistics for ruler queries to complete as a per user metric and as an info level log message.", + "type": "boolean", + "x-cli-flag": "ruler.query-stats-enabled" + }, + "resend_delay": { + "default": "1m0s", + "description": "Minimum amount of time to wait before resending an alert to Alertmanager.", + "type": "string", + "x-cli-flag": "ruler.resend-delay", + "x-format": "duration" + }, + "ring": { + "properties": { + "detailed_metrics_enabled": { + "default": true, + "description": "Set to true to enable ring detailed metrics. These metrics provide detailed information, such as token count and ownership per tenant. Disabling them can significantly decrease the number of metrics emitted.", + "type": "boolean", + "x-cli-flag": "ruler.ring.detailed-metrics-enabled" + }, + "final_sleep": { + "default": "0s", + "description": "The sleep seconds when ruler is shutting down. Need to be close to or larger than KV Store information propagation delay", + "type": "string", + "x-cli-flag": "ruler.ring.final-sleep", + "x-format": "duration" + }, + "heartbeat_period": { + "default": "5s", + "description": "Period at which to heartbeat to the ring. 0 = disabled.", + "type": "string", + "x-cli-flag": "ruler.ring.heartbeat-period", + "x-format": "duration" + }, + "heartbeat_timeout": { + "default": "1m0s", + "description": "The heartbeat timeout after which rulers are considered unhealthy within the ring. 0 = never (timeout disabled).", + "type": "string", + "x-cli-flag": "ruler.ring.heartbeat-timeout", + "x-format": "duration" + }, + "instance_interface_names": { + "default": "[eth0 en0]", + "description": "Name of network interface to read address from.", + "items": { + "type": "string" + }, + "type": "array", + "x-cli-flag": "ruler.ring.instance-interface-names" + }, + "keep_instance_in_the_ring_on_shutdown": { + "default": false, + "description": "Keep instance in the ring on shut down.", + "type": "boolean", + "x-cli-flag": "ruler.ring.keep-instance-in-the-ring-on-shutdown" + }, + "kvstore": { + "properties": { + "consul": { + "$ref": "#/definitions/consul_config" + }, + "dynamodb": { + "properties": { + "max_cas_retries": { + "default": 10, + "description": "Maximum number of retries for DDB KV CAS.", + "type": "number", + "x-cli-flag": "ruler.ring.dynamodb.max-cas-retries" + }, + "puller_sync_time": { + "default": "1m0s", + "description": "Time to refresh local ring with information on dynamodb.", + "type": "string", + "x-cli-flag": "ruler.ring.dynamodb.puller-sync-time", + "x-format": "duration" + }, + "region": { + "description": "Region to access dynamodb.", + "type": "string", + "x-cli-flag": "ruler.ring.dynamodb.region" + }, + "table_name": { + "description": "Table name to use on dynamodb.", + "type": "string", + "x-cli-flag": "ruler.ring.dynamodb.table-name" + }, + "timeout": { + "default": "2m0s", + "description": "Timeout of dynamoDbClient requests. Default is 2m.", + "type": "string", + "x-cli-flag": "ruler.ring.dynamodb.timeout", + "x-format": "duration" + }, + "ttl": { + "default": "0s", + "description": "Time to expire items on dynamodb.", + "type": "string", + "x-cli-flag": "ruler.ring.dynamodb.ttl-time", + "x-format": "duration" + } + }, + "type": "object" + }, + "etcd": { + "$ref": "#/definitions/etcd_config" + }, + "multi": { + "properties": { + "mirror_enabled": { + "default": false, + "description": "Mirror writes to secondary store.", + "type": "boolean", + "x-cli-flag": "ruler.ring.multi.mirror-enabled" + }, + "mirror_timeout": { + "default": "2s", + "description": "Timeout for storing value to secondary store.", + "type": "string", + "x-cli-flag": "ruler.ring.multi.mirror-timeout", + "x-format": "duration" + }, + "primary": { + "description": "Primary backend storage used by multi-client.", + "type": "string", + "x-cli-flag": "ruler.ring.multi.primary" + }, + "secondary": { + "description": "Secondary backend storage used by multi-client.", + "type": "string", + "x-cli-flag": "ruler.ring.multi.secondary" + } + }, + "type": "object" + }, + "prefix": { + "default": "rulers/", + "description": "The prefix for the keys in the store. Should end with a /.", + "type": "string", + "x-cli-flag": "ruler.ring.prefix" + }, + "store": { + "default": "consul", + "description": "Backend storage to use for the ring. Supported values are: consul, dynamodb, etcd, inmemory, memberlist, multi.", + "type": "string", + "x-cli-flag": "ruler.ring.store" + } + }, + "type": "object" + }, + "num_tokens": { + "default": 128, + "description": "Number of tokens for each ruler.", + "type": "number", + "x-cli-flag": "ruler.ring.num-tokens" + }, + "replication_factor": { + "default": 1, + "description": "EXPERIMENTAL: The replication factor to use when loading rule groups for API HA.", + "type": "number", + "x-cli-flag": "ruler.ring.replication-factor" + }, + "tokens_file_path": { + "description": "EXPERIMENTAL: File path where tokens are stored. If empty, tokens are not stored at shutdown and restored at startup.", + "type": "string", + "x-cli-flag": "ruler.ring.tokens-file-path" + }, + "zone_awareness_enabled": { + "default": false, + "description": "EXPERIMENTAL: True to enable zone-awareness and load rule groups across different availability zones for API HA.", + "type": "boolean", + "x-cli-flag": "ruler.ring.zone-awareness-enabled" + } + }, + "type": "object" + }, + "rule_path": { + "default": "/rules", + "description": "file path to store temporary rule files for the prometheus rule managers", + "type": "string", + "x-cli-flag": "ruler.rule-path" + }, + "ruler_client": { + "properties": { + "backoff_config": { + "properties": { + "max_period": { + "default": "10s", + "description": "Maximum delay when backing off.", + "type": "string", + "x-cli-flag": "ruler.client.backoff-max-period", + "x-format": "duration" + }, + "max_retries": { + "default": 10, + "description": "Number of times to backoff and retry before failing.", + "type": "number", + "x-cli-flag": "ruler.client.backoff-retries" + }, + "min_period": { + "default": "100ms", + "description": "Minimum delay when backing off.", + "type": "string", + "x-cli-flag": "ruler.client.backoff-min-period", + "x-format": "duration" + } + }, + "type": "object" + }, + "backoff_on_ratelimits": { + "default": false, + "description": "Enable backoff and retry when we hit ratelimits.", + "type": "boolean", + "x-cli-flag": "ruler.client.backoff-on-ratelimits" + }, + "connect_timeout": { + "default": "5s", + "description": "The maximum amount of time to establish a connection. A value of 0 means using default gRPC client connect timeout 20s.", + "type": "string", + "x-cli-flag": "ruler.client.connect-timeout", + "x-format": "duration" + }, + "grpc_compression": { + "description": "Use compression when sending messages. Supported values are: 'gzip', 'snappy', 'snappy-block' ,'zstd' and '' (disable compression)", + "type": "string", + "x-cli-flag": "ruler.client.grpc-compression" + }, + "max_recv_msg_size": { + "default": 104857600, + "description": "gRPC client max receive message size (bytes).", + "type": "number", + "x-cli-flag": "ruler.client.grpc-max-recv-msg-size" + }, + "max_send_msg_size": { + "default": 16777216, + "description": "gRPC client max send message size (bytes).", + "type": "number", + "x-cli-flag": "ruler.client.grpc-max-send-msg-size" + }, + "rate_limit": { + "default": 0, + "description": "Rate limit for gRPC client; 0 means disabled.", + "type": "number", + "x-cli-flag": "ruler.client.grpc-client-rate-limit" + }, + "rate_limit_burst": { + "default": 0, + "description": "Rate limit burst for gRPC client.", + "type": "number", + "x-cli-flag": "ruler.client.grpc-client-rate-limit-burst" + }, + "remote_timeout": { + "default": "2m0s", + "description": "Timeout for downstream rulers.", + "type": "string", + "x-cli-flag": "ruler.client.remote-timeout", + "x-format": "duration" + }, + "tls_ca_path": { + "description": "Path to the CA certificates file to validate server certificate against. If not set, the host's root CA certificates are used.", + "type": "string", + "x-cli-flag": "ruler.client.tls-ca-path" + }, + "tls_cert_path": { + "description": "Path to the client certificate file, which will be used for authenticating with the server. Also requires the key path to be configured.", + "type": "string", + "x-cli-flag": "ruler.client.tls-cert-path" + }, + "tls_enabled": { + "default": false, + "description": "Enable TLS in the GRPC client. This flag needs to be enabled when any other TLS flag is set. If set to false, insecure connection to gRPC server will be used.", + "type": "boolean", + "x-cli-flag": "ruler.client.tls-enabled" + }, + "tls_insecure_skip_verify": { + "default": false, + "description": "Skip validating server certificate.", + "type": "boolean", + "x-cli-flag": "ruler.client.tls-insecure-skip-verify" + }, + "tls_key_path": { + "description": "Path to the key file for the client certificate. Also requires the client certificate to be configured.", + "type": "string", + "x-cli-flag": "ruler.client.tls-key-path" + }, + "tls_server_name": { + "description": "Override the expected name on the server certificate.", + "type": "string", + "x-cli-flag": "ruler.client.tls-server-name" + } + }, + "type": "object" + }, + "search_pending_for": { + "default": "5m0s", + "description": "Time to spend searching for a pending ruler when shutting down.", + "type": "string", + "x-cli-flag": "ruler.search-pending-for", + "x-format": "duration" + }, + "sharding_strategy": { + "default": "default", + "description": "The sharding strategy to use. Supported values are: default, shuffle-sharding.", + "type": "string", + "x-cli-flag": "ruler.sharding-strategy" + }, + "thanos_engine": { + "properties": { + "enable_x_functions": { + "default": false, + "description": "Enable xincrease, xdelta, xrate etc from Thanos engine.", + "type": "boolean", + "x-cli-flag": "ruler.enable-x-functions" + }, + "enabled": { + "default": false, + "description": "Experimental. Use Thanos promql engine https://github.com/thanos-io/promql-engine rather than the Prometheus promql engine.", + "type": "boolean", + "x-cli-flag": "ruler.thanos-engine" + }, + "optimizers": { + "default": "default", + "description": "Logical plan optimizers. Multiple optimizers can be provided as a comma-separated list. Supported values: default, all, propagate-matchers, sort-matchers, merge-selects, detect-histogram-stats", + "type": "string", + "x-cli-flag": "ruler.optimizers" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "ruler_storage_config": { + "description": "The ruler_storage_config configures the Cortex ruler storage backend.", + "properties": { + "azure": { + "properties": { + "account_key": { + "description": "Azure storage account key", + "type": "string", + "x-cli-flag": "ruler-storage.azure.account-key" + }, + "account_name": { + "description": "Azure storage account name", + "type": "string", + "x-cli-flag": "ruler-storage.azure.account-name" + }, + "connection_string": { + "description": "The values of `account-name` and `endpoint-suffix` values will not be ignored if `connection-string` is set. Use this method over `account-key` if you need to authenticate via a SAS token or if you use the Azurite emulator.", + "type": "string", + "x-cli-flag": "ruler-storage.azure.connection-string" + }, + "container_name": { + "description": "Azure storage container name", + "type": "string", + "x-cli-flag": "ruler-storage.azure.container-name" + }, + "endpoint_suffix": { + "description": "Azure storage endpoint suffix without schema. The account name will be prefixed to this value to create the FQDN", + "type": "string", + "x-cli-flag": "ruler-storage.azure.endpoint-suffix" + }, + "http": { + "properties": { + "expect_continue_timeout": { + "default": "1s", + "description": "The time to wait for a server's first response headers after fully writing the request headers if the request has an Expect header. 0 to send the request body immediately.", + "type": "string", + "x-cli-flag": "ruler-storage.azure.expect-continue-timeout", + "x-format": "duration" + }, + "idle_conn_timeout": { + "default": "1m30s", + "description": "The time an idle connection will remain idle before closing.", + "type": "string", + "x-cli-flag": "ruler-storage.azure.http.idle-conn-timeout", + "x-format": "duration" + }, + "insecure_skip_verify": { + "default": false, + "description": "If the client connects via HTTPS and this option is enabled, the client will accept any certificate and hostname.", + "type": "boolean", + "x-cli-flag": "ruler-storage.azure.http.insecure-skip-verify" + }, + "max_connections_per_host": { + "default": 0, + "description": "Maximum number of connections per host. 0 means no limit.", + "type": "number", + "x-cli-flag": "ruler-storage.azure.max-connections-per-host" + }, + "max_idle_connections": { + "default": 100, + "description": "Maximum number of idle (keep-alive) connections across all hosts. 0 means no limit.", + "type": "number", + "x-cli-flag": "ruler-storage.azure.max-idle-connections" + }, + "max_idle_connections_per_host": { + "default": 100, + "description": "Maximum number of idle (keep-alive) connections to keep per-host. If 0, a built-in default value is used.", + "type": "number", + "x-cli-flag": "ruler-storage.azure.max-idle-connections-per-host" + }, + "response_header_timeout": { + "default": "2m0s", + "description": "The amount of time the client will wait for a servers response headers.", + "type": "string", + "x-cli-flag": "ruler-storage.azure.http.response-header-timeout", + "x-format": "duration" + }, + "tls_handshake_timeout": { + "default": "10s", + "description": "Maximum time to wait for a TLS handshake. 0 means no limit.", + "type": "string", + "x-cli-flag": "ruler-storage.azure.tls-handshake-timeout", + "x-format": "duration" + } + }, + "type": "object" + }, + "max_retries": { + "default": 20, + "description": "Number of retries for recoverable errors", + "type": "number", + "x-cli-flag": "ruler-storage.azure.max-retries" + }, + "msi_resource": { + "description": "Deprecated: Azure storage MSI resource. It will be set automatically by Azure SDK.", + "type": "string", + "x-cli-flag": "ruler-storage.azure.msi-resource" + }, + "user_assigned_id": { + "description": "Azure storage MSI resource managed identity client Id. If not supplied default Azure credential will be used. Set it to empty if you need to authenticate via Azure Workload Identity.", + "type": "string", + "x-cli-flag": "ruler-storage.azure.user-assigned-id" + } + }, + "type": "object" + }, + "backend": { + "default": "s3", + "description": "Backend storage to use. Supported backends are: s3, gcs, azure, swift, filesystem, configdb, local.", + "type": "string", + "x-cli-flag": "ruler-storage.backend" + }, + "configdb": { + "$ref": "#/definitions/configstore_config" + }, + "filesystem": { + "properties": { + "dir": { + "description": "Local filesystem storage directory.", + "type": "string", + "x-cli-flag": "ruler-storage.filesystem.dir" + } + }, + "type": "object" + }, + "gcs": { + "properties": { + "bucket_name": { + "description": "GCS bucket name", + "type": "string", + "x-cli-flag": "ruler-storage.gcs.bucket-name" + }, + "service_account": { + "description": "JSON representing either a Google Developers Console client_credentials.json file or a Google Developers service account key file. If empty, fallback to Google default logic.", + "type": "string", + "x-cli-flag": "ruler-storage.gcs.service-account" + } + }, + "type": "object" + }, + "local": { + "properties": { + "directory": { + "description": "Directory to scan for rules", + "type": "string", + "x-cli-flag": "ruler-storage.local.directory" + } + }, + "type": "object" + }, + "s3": { + "properties": { + "access_key_id": { + "description": "S3 access key ID", + "type": "string", + "x-cli-flag": "ruler-storage.s3.access-key-id" + }, + "bucket_lookup_type": { + "default": "auto", + "description": "The s3 bucket lookup style. Supported values are: auto, virtual-hosted, path.", + "type": "string", + "x-cli-flag": "ruler-storage.s3.bucket-lookup-type" + }, + "bucket_name": { + "description": "S3 bucket name", + "type": "string", + "x-cli-flag": "ruler-storage.s3.bucket-name" + }, + "disable_dualstack": { + "default": false, + "description": "If enabled, S3 endpoint will use the non-dualstack variant.", + "type": "boolean", + "x-cli-flag": "ruler-storage.s3.disable-dualstack" + }, + "endpoint": { + "description": "The S3 bucket endpoint. It could be an AWS S3 endpoint listed at https://docs.aws.amazon.com/general/latest/gr/s3.html or the address of an S3-compatible service in hostname:port format.", + "type": "string", + "x-cli-flag": "ruler-storage.s3.endpoint" + }, + "http": { + "properties": { + "expect_continue_timeout": { + "default": "1s", + "description": "The time to wait for a server's first response headers after fully writing the request headers if the request has an Expect header. 0 to send the request body immediately.", + "type": "string", + "x-cli-flag": "ruler-storage.s3.expect-continue-timeout", + "x-format": "duration" + }, + "idle_conn_timeout": { + "default": "1m30s", + "description": "The time an idle connection will remain idle before closing.", + "type": "string", + "x-cli-flag": "ruler-storage.s3.http.idle-conn-timeout", + "x-format": "duration" + }, + "insecure_skip_verify": { + "default": false, + "description": "If the client connects via HTTPS and this option is enabled, the client will accept any certificate and hostname.", + "type": "boolean", + "x-cli-flag": "ruler-storage.s3.http.insecure-skip-verify" + }, + "max_connections_per_host": { + "default": 0, + "description": "Maximum number of connections per host. 0 means no limit.", + "type": "number", + "x-cli-flag": "ruler-storage.s3.max-connections-per-host" + }, + "max_idle_connections": { + "default": 100, + "description": "Maximum number of idle (keep-alive) connections across all hosts. 0 means no limit.", + "type": "number", + "x-cli-flag": "ruler-storage.s3.max-idle-connections" + }, + "max_idle_connections_per_host": { + "default": 100, + "description": "Maximum number of idle (keep-alive) connections to keep per-host. If 0, a built-in default value is used.", + "type": "number", + "x-cli-flag": "ruler-storage.s3.max-idle-connections-per-host" + }, + "response_header_timeout": { + "default": "2m0s", + "description": "The amount of time the client will wait for a servers response headers.", + "type": "string", + "x-cli-flag": "ruler-storage.s3.http.response-header-timeout", + "x-format": "duration" + }, + "tls_handshake_timeout": { + "default": "10s", + "description": "Maximum time to wait for a TLS handshake. 0 means no limit.", + "type": "string", + "x-cli-flag": "ruler-storage.s3.tls-handshake-timeout", + "x-format": "duration" + } + }, + "type": "object" + }, + "insecure": { + "default": false, + "description": "If enabled, use http:// for the S3 endpoint instead of https://. This could be useful in local dev/test environments while using an S3-compatible backend storage, like Minio.", + "type": "boolean", + "x-cli-flag": "ruler-storage.s3.insecure" + }, + "list_objects_version": { + "description": "The list api version. Supported values are: v1, v2, and ''.", + "type": "string", + "x-cli-flag": "ruler-storage.s3.list-objects-version" + }, + "region": { + "description": "S3 region. If unset, the client will issue a S3 GetBucketLocation API call to autodetect it.", + "type": "string", + "x-cli-flag": "ruler-storage.s3.region" + }, + "secret_access_key": { + "description": "S3 secret access key", + "type": "string", + "x-cli-flag": "ruler-storage.s3.secret-access-key" + }, + "send_content_md5": { + "default": true, + "description": "If true, attach MD5 checksum when upload objects and S3 uses MD5 checksum algorithm to verify the provided digest. If false, use CRC32C algorithm instead.", + "type": "boolean", + "x-cli-flag": "ruler-storage.s3.send-content-md5" + }, + "signature_version": { + "default": "v4", + "description": "The signature version to use for authenticating against S3. Supported values are: v4, v2.", + "type": "string", + "x-cli-flag": "ruler-storage.s3.signature-version" + }, + "sse": { + "$ref": "#/definitions/s3_sse_config" + } + }, + "type": "object" + }, + "swift": { + "properties": { + "application_credential_id": { + "description": "OpenStack Swift application credential ID.", + "type": "string", + "x-cli-flag": "ruler-storage.swift.application-credential-id" + }, + "application_credential_name": { + "description": "OpenStack Swift application credential name.", + "type": "string", + "x-cli-flag": "ruler-storage.swift.application-credential-name" + }, + "application_credential_secret": { + "description": "OpenStack Swift application credential secret.", + "type": "string", + "x-cli-flag": "ruler-storage.swift.application-credential-secret" + }, + "auth_url": { + "description": "OpenStack Swift authentication URL", + "type": "string", + "x-cli-flag": "ruler-storage.swift.auth-url" + }, + "auth_version": { + "default": 0, + "description": "OpenStack Swift authentication API version. 0 to autodetect.", + "type": "number", + "x-cli-flag": "ruler-storage.swift.auth-version" + }, + "connect_timeout": { + "default": "10s", + "description": "Time after which a connection attempt is aborted.", + "type": "string", + "x-cli-flag": "ruler-storage.swift.connect-timeout", + "x-format": "duration" + }, + "container_name": { + "description": "Name of the OpenStack Swift container to put chunks in.", + "type": "string", + "x-cli-flag": "ruler-storage.swift.container-name" + }, + "domain_id": { + "description": "OpenStack Swift user's domain ID.", + "type": "string", + "x-cli-flag": "ruler-storage.swift.domain-id" + }, + "domain_name": { + "description": "OpenStack Swift user's domain name.", + "type": "string", + "x-cli-flag": "ruler-storage.swift.domain-name" + }, + "max_retries": { + "default": 3, + "description": "Max retries on requests error.", + "type": "number", + "x-cli-flag": "ruler-storage.swift.max-retries" + }, + "password": { + "description": "OpenStack Swift API key.", + "type": "string", + "x-cli-flag": "ruler-storage.swift.password" + }, + "project_domain_id": { + "description": "ID of the OpenStack Swift project's domain (v3 auth only), only needed if it differs the from user domain.", + "type": "string", + "x-cli-flag": "ruler-storage.swift.project-domain-id" + }, + "project_domain_name": { + "description": "Name of the OpenStack Swift project's domain (v3 auth only), only needed if it differs from the user domain.", + "type": "string", + "x-cli-flag": "ruler-storage.swift.project-domain-name" + }, + "project_id": { + "description": "OpenStack Swift project ID (v2,v3 auth only).", + "type": "string", + "x-cli-flag": "ruler-storage.swift.project-id" + }, + "project_name": { + "description": "OpenStack Swift project name (v2,v3 auth only).", + "type": "string", + "x-cli-flag": "ruler-storage.swift.project-name" + }, + "region_name": { + "description": "OpenStack Swift Region to use (v2,v3 auth only).", + "type": "string", + "x-cli-flag": "ruler-storage.swift.region-name" + }, + "request_timeout": { + "default": "5s", + "description": "Time after which an idle request is aborted. The timeout watchdog is reset each time some data is received, so the timeout triggers after X time no data is received on a request.", + "type": "string", + "x-cli-flag": "ruler-storage.swift.request-timeout", + "x-format": "duration" + }, + "user_domain_id": { + "description": "OpenStack Swift user's domain ID.", + "type": "string", + "x-cli-flag": "ruler-storage.swift.user-domain-id" + }, + "user_domain_name": { + "description": "OpenStack Swift user's domain name.", + "type": "string", + "x-cli-flag": "ruler-storage.swift.user-domain-name" + }, + "user_id": { + "description": "OpenStack Swift user ID.", + "type": "string", + "x-cli-flag": "ruler-storage.swift.user-id" + }, + "username": { + "description": "OpenStack Swift username.", + "type": "string", + "x-cli-flag": "ruler-storage.swift.username" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "runtime_configuration_storage_config": { + "description": "The runtime_configuration_storage_config configures the storage backend for the runtime configuration file.", + "properties": { + "azure": { + "properties": { + "account_key": { + "description": "Azure storage account key", + "type": "string", + "x-cli-flag": "runtime-config.azure.account-key" + }, + "account_name": { + "description": "Azure storage account name", + "type": "string", + "x-cli-flag": "runtime-config.azure.account-name" + }, + "connection_string": { + "description": "The values of `account-name` and `endpoint-suffix` values will not be ignored if `connection-string` is set. Use this method over `account-key` if you need to authenticate via a SAS token or if you use the Azurite emulator.", + "type": "string", + "x-cli-flag": "runtime-config.azure.connection-string" + }, + "container_name": { + "description": "Azure storage container name", + "type": "string", + "x-cli-flag": "runtime-config.azure.container-name" + }, + "endpoint_suffix": { + "description": "Azure storage endpoint suffix without schema. The account name will be prefixed to this value to create the FQDN", + "type": "string", + "x-cli-flag": "runtime-config.azure.endpoint-suffix" + }, + "http": { + "properties": { + "expect_continue_timeout": { + "default": "1s", + "description": "The time to wait for a server's first response headers after fully writing the request headers if the request has an Expect header. 0 to send the request body immediately.", + "type": "string", + "x-cli-flag": "runtime-config.azure.expect-continue-timeout", + "x-format": "duration" + }, + "idle_conn_timeout": { + "default": "1m30s", + "description": "The time an idle connection will remain idle before closing.", + "type": "string", + "x-cli-flag": "runtime-config.azure.http.idle-conn-timeout", + "x-format": "duration" + }, + "insecure_skip_verify": { + "default": false, + "description": "If the client connects via HTTPS and this option is enabled, the client will accept any certificate and hostname.", + "type": "boolean", + "x-cli-flag": "runtime-config.azure.http.insecure-skip-verify" + }, + "max_connections_per_host": { + "default": 0, + "description": "Maximum number of connections per host. 0 means no limit.", + "type": "number", + "x-cli-flag": "runtime-config.azure.max-connections-per-host" + }, + "max_idle_connections": { + "default": 100, + "description": "Maximum number of idle (keep-alive) connections across all hosts. 0 means no limit.", + "type": "number", + "x-cli-flag": "runtime-config.azure.max-idle-connections" + }, + "max_idle_connections_per_host": { + "default": 100, + "description": "Maximum number of idle (keep-alive) connections to keep per-host. If 0, a built-in default value is used.", + "type": "number", + "x-cli-flag": "runtime-config.azure.max-idle-connections-per-host" + }, + "response_header_timeout": { + "default": "2m0s", + "description": "The amount of time the client will wait for a servers response headers.", + "type": "string", + "x-cli-flag": "runtime-config.azure.http.response-header-timeout", + "x-format": "duration" + }, + "tls_handshake_timeout": { + "default": "10s", + "description": "Maximum time to wait for a TLS handshake. 0 means no limit.", + "type": "string", + "x-cli-flag": "runtime-config.azure.tls-handshake-timeout", + "x-format": "duration" + } + }, + "type": "object" + }, + "max_retries": { + "default": 20, + "description": "Number of retries for recoverable errors", + "type": "number", + "x-cli-flag": "runtime-config.azure.max-retries" + }, + "msi_resource": { + "description": "Deprecated: Azure storage MSI resource. It will be set automatically by Azure SDK.", + "type": "string", + "x-cli-flag": "runtime-config.azure.msi-resource" + }, + "user_assigned_id": { + "description": "Azure storage MSI resource managed identity client Id. If not supplied default Azure credential will be used. Set it to empty if you need to authenticate via Azure Workload Identity.", + "type": "string", + "x-cli-flag": "runtime-config.azure.user-assigned-id" + } + }, + "type": "object" + }, + "backend": { + "default": "filesystem", + "description": "Backend storage to use. Supported backends are: s3, gcs, azure, swift, filesystem.", + "type": "string", + "x-cli-flag": "runtime-config.backend" + }, + "file": { + "description": "File with the configuration that can be updated in runtime.", + "type": "string", + "x-cli-flag": "runtime-config.file" + }, + "filesystem": { + "properties": { + "dir": { + "description": "Local filesystem storage directory.", + "type": "string", + "x-cli-flag": "runtime-config.filesystem.dir" + } + }, + "type": "object" + }, + "gcs": { + "properties": { + "bucket_name": { + "description": "GCS bucket name", + "type": "string", + "x-cli-flag": "runtime-config.gcs.bucket-name" + }, + "service_account": { + "description": "JSON representing either a Google Developers Console client_credentials.json file or a Google Developers service account key file. If empty, fallback to Google default logic.", + "type": "string", + "x-cli-flag": "runtime-config.gcs.service-account" + } + }, + "type": "object" + }, + "period": { + "default": "10s", + "description": "How often to check runtime config file.", + "type": "string", + "x-cli-flag": "runtime-config.reload-period", + "x-format": "duration" + }, + "s3": { + "properties": { + "access_key_id": { + "description": "S3 access key ID", + "type": "string", + "x-cli-flag": "runtime-config.s3.access-key-id" + }, + "bucket_lookup_type": { + "default": "auto", + "description": "The s3 bucket lookup style. Supported values are: auto, virtual-hosted, path.", + "type": "string", + "x-cli-flag": "runtime-config.s3.bucket-lookup-type" + }, + "bucket_name": { + "description": "S3 bucket name", + "type": "string", + "x-cli-flag": "runtime-config.s3.bucket-name" + }, + "disable_dualstack": { + "default": false, + "description": "If enabled, S3 endpoint will use the non-dualstack variant.", + "type": "boolean", + "x-cli-flag": "runtime-config.s3.disable-dualstack" + }, + "endpoint": { + "description": "The S3 bucket endpoint. It could be an AWS S3 endpoint listed at https://docs.aws.amazon.com/general/latest/gr/s3.html or the address of an S3-compatible service in hostname:port format.", + "type": "string", + "x-cli-flag": "runtime-config.s3.endpoint" + }, + "http": { + "properties": { + "expect_continue_timeout": { + "default": "1s", + "description": "The time to wait for a server's first response headers after fully writing the request headers if the request has an Expect header. 0 to send the request body immediately.", + "type": "string", + "x-cli-flag": "runtime-config.s3.expect-continue-timeout", + "x-format": "duration" + }, + "idle_conn_timeout": { + "default": "1m30s", + "description": "The time an idle connection will remain idle before closing.", + "type": "string", + "x-cli-flag": "runtime-config.s3.http.idle-conn-timeout", + "x-format": "duration" + }, + "insecure_skip_verify": { + "default": false, + "description": "If the client connects via HTTPS and this option is enabled, the client will accept any certificate and hostname.", + "type": "boolean", + "x-cli-flag": "runtime-config.s3.http.insecure-skip-verify" + }, + "max_connections_per_host": { + "default": 0, + "description": "Maximum number of connections per host. 0 means no limit.", + "type": "number", + "x-cli-flag": "runtime-config.s3.max-connections-per-host" + }, + "max_idle_connections": { + "default": 100, + "description": "Maximum number of idle (keep-alive) connections across all hosts. 0 means no limit.", + "type": "number", + "x-cli-flag": "runtime-config.s3.max-idle-connections" + }, + "max_idle_connections_per_host": { + "default": 100, + "description": "Maximum number of idle (keep-alive) connections to keep per-host. If 0, a built-in default value is used.", + "type": "number", + "x-cli-flag": "runtime-config.s3.max-idle-connections-per-host" + }, + "response_header_timeout": { + "default": "2m0s", + "description": "The amount of time the client will wait for a servers response headers.", + "type": "string", + "x-cli-flag": "runtime-config.s3.http.response-header-timeout", + "x-format": "duration" + }, + "tls_handshake_timeout": { + "default": "10s", + "description": "Maximum time to wait for a TLS handshake. 0 means no limit.", + "type": "string", + "x-cli-flag": "runtime-config.s3.tls-handshake-timeout", + "x-format": "duration" + } + }, + "type": "object" + }, + "insecure": { + "default": false, + "description": "If enabled, use http:// for the S3 endpoint instead of https://. This could be useful in local dev/test environments while using an S3-compatible backend storage, like Minio.", + "type": "boolean", + "x-cli-flag": "runtime-config.s3.insecure" + }, + "list_objects_version": { + "description": "The list api version. Supported values are: v1, v2, and ''.", + "type": "string", + "x-cli-flag": "runtime-config.s3.list-objects-version" + }, + "region": { + "description": "S3 region. If unset, the client will issue a S3 GetBucketLocation API call to autodetect it.", + "type": "string", + "x-cli-flag": "runtime-config.s3.region" + }, + "secret_access_key": { + "description": "S3 secret access key", + "type": "string", + "x-cli-flag": "runtime-config.s3.secret-access-key" + }, + "send_content_md5": { + "default": true, + "description": "If true, attach MD5 checksum when upload objects and S3 uses MD5 checksum algorithm to verify the provided digest. If false, use CRC32C algorithm instead.", + "type": "boolean", + "x-cli-flag": "runtime-config.s3.send-content-md5" + }, + "signature_version": { + "default": "v4", + "description": "The signature version to use for authenticating against S3. Supported values are: v4, v2.", + "type": "string", + "x-cli-flag": "runtime-config.s3.signature-version" + }, + "sse": { + "$ref": "#/definitions/s3_sse_config" + } + }, + "type": "object" + }, + "swift": { + "properties": { + "application_credential_id": { + "description": "OpenStack Swift application credential ID.", + "type": "string", + "x-cli-flag": "runtime-config.swift.application-credential-id" + }, + "application_credential_name": { + "description": "OpenStack Swift application credential name.", + "type": "string", + "x-cli-flag": "runtime-config.swift.application-credential-name" + }, + "application_credential_secret": { + "description": "OpenStack Swift application credential secret.", + "type": "string", + "x-cli-flag": "runtime-config.swift.application-credential-secret" + }, + "auth_url": { + "description": "OpenStack Swift authentication URL", + "type": "string", + "x-cli-flag": "runtime-config.swift.auth-url" + }, + "auth_version": { + "default": 0, + "description": "OpenStack Swift authentication API version. 0 to autodetect.", + "type": "number", + "x-cli-flag": "runtime-config.swift.auth-version" + }, + "connect_timeout": { + "default": "10s", + "description": "Time after which a connection attempt is aborted.", + "type": "string", + "x-cli-flag": "runtime-config.swift.connect-timeout", + "x-format": "duration" + }, + "container_name": { + "description": "Name of the OpenStack Swift container to put chunks in.", + "type": "string", + "x-cli-flag": "runtime-config.swift.container-name" + }, + "domain_id": { + "description": "OpenStack Swift user's domain ID.", + "type": "string", + "x-cli-flag": "runtime-config.swift.domain-id" + }, + "domain_name": { + "description": "OpenStack Swift user's domain name.", + "type": "string", + "x-cli-flag": "runtime-config.swift.domain-name" + }, + "max_retries": { + "default": 3, + "description": "Max retries on requests error.", + "type": "number", + "x-cli-flag": "runtime-config.swift.max-retries" + }, + "password": { + "description": "OpenStack Swift API key.", + "type": "string", + "x-cli-flag": "runtime-config.swift.password" + }, + "project_domain_id": { + "description": "ID of the OpenStack Swift project's domain (v3 auth only), only needed if it differs the from user domain.", + "type": "string", + "x-cli-flag": "runtime-config.swift.project-domain-id" + }, + "project_domain_name": { + "description": "Name of the OpenStack Swift project's domain (v3 auth only), only needed if it differs from the user domain.", + "type": "string", + "x-cli-flag": "runtime-config.swift.project-domain-name" + }, + "project_id": { + "description": "OpenStack Swift project ID (v2,v3 auth only).", + "type": "string", + "x-cli-flag": "runtime-config.swift.project-id" + }, + "project_name": { + "description": "OpenStack Swift project name (v2,v3 auth only).", + "type": "string", + "x-cli-flag": "runtime-config.swift.project-name" + }, + "region_name": { + "description": "OpenStack Swift Region to use (v2,v3 auth only).", + "type": "string", + "x-cli-flag": "runtime-config.swift.region-name" + }, + "request_timeout": { + "default": "5s", + "description": "Time after which an idle request is aborted. The timeout watchdog is reset each time some data is received, so the timeout triggers after X time no data is received on a request.", + "type": "string", + "x-cli-flag": "runtime-config.swift.request-timeout", + "x-format": "duration" + }, + "user_domain_id": { + "description": "OpenStack Swift user's domain ID.", + "type": "string", + "x-cli-flag": "runtime-config.swift.user-domain-id" + }, + "user_domain_name": { + "description": "OpenStack Swift user's domain name.", + "type": "string", + "x-cli-flag": "runtime-config.swift.user-domain-name" + }, + "user_id": { + "description": "OpenStack Swift user ID.", + "type": "string", + "x-cli-flag": "runtime-config.swift.user-id" + }, + "username": { + "description": "OpenStack Swift username.", + "type": "string", + "x-cli-flag": "runtime-config.swift.username" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "s3_sse_config": { + "description": "The s3_sse_config configures the S3 server-side encryption.", + "properties": { + "kms_encryption_context": { + "description": "KMS Encryption Context used for object encryption. It expects JSON formatted string.", + "type": "string", + "x-cli-flag": "\u003cprefix\u003e.s3.sse.kms-encryption-context" + }, + "kms_key_id": { + "description": "KMS Key ID used to encrypt objects in S3", + "type": "string", + "x-cli-flag": "\u003cprefix\u003e.s3.sse.kms-key-id" + }, + "type": { + "description": "Enable AWS Server Side Encryption. Supported values: SSE-KMS, SSE-S3.", + "type": "string", + "x-cli-flag": "\u003cprefix\u003e.s3.sse.type" + } + }, + "type": "object" + }, + "server_config": { + "description": "The server_config configures the HTTP and gRPC server of the launched service(s).", + "properties": { + "enable_channelz": { + "default": false, + "description": "Enable Channelz for gRPC server. A web UI will be also exposed on the HTTP server at /channelz", + "type": "boolean", + "x-cli-flag": "server.enable-channelz" + }, + "graceful_shutdown_timeout": { + "default": "30s", + "description": "Timeout for graceful shutdowns", + "type": "string", + "x-cli-flag": "server.graceful-shutdown-timeout", + "x-format": "duration" + }, + "grpc_listen_address": { + "description": "gRPC server listen address.", + "type": "string", + "x-cli-flag": "server.grpc-listen-address" + }, + "grpc_listen_conn_limit": { + "default": 0, + "description": "Maximum number of simultaneous grpc connections, \u003c=0 to disable", + "type": "number", + "x-cli-flag": "server.grpc-conn-limit" + }, + "grpc_listen_network": { + "default": "tcp", + "description": "gRPC server listen network", + "type": "string", + "x-cli-flag": "server.grpc-listen-network" + }, + "grpc_listen_port": { + "default": 9095, + "description": "gRPC server listen port.", + "type": "number", + "x-cli-flag": "server.grpc-listen-port" + }, + "grpc_server_keepalive_time": { + "default": "2h0m0s", + "description": "Duration after which a keepalive probe is sent in case of no activity over the connection., Default: 2h", + "type": "string", + "x-cli-flag": "server.grpc.keepalive.time", + "x-format": "duration" + }, + "grpc_server_keepalive_timeout": { + "default": "20s", + "description": "After having pinged for keepalive check, the duration after which an idle connection should be closed, Default: 20s", + "type": "string", + "x-cli-flag": "server.grpc.keepalive.timeout", + "x-format": "duration" + }, + "grpc_server_max_concurrent_streams": { + "default": 100, + "description": "Limit on the number of concurrent streams for gRPC calls (0 = unlimited)", + "type": "number", + "x-cli-flag": "server.grpc-max-concurrent-streams" + }, + "grpc_server_max_connection_age": { + "default": "2562047h47m16.854775807s", + "description": "The duration for the maximum amount of time a connection may exist before it will be closed. Default: infinity", + "type": "string", + "x-cli-flag": "server.grpc.keepalive.max-connection-age", + "x-format": "duration" + }, + "grpc_server_max_connection_age_grace": { + "default": "2562047h47m16.854775807s", + "description": "An additive period after max-connection-age after which the connection will be forcibly closed. Default: infinity", + "type": "string", + "x-cli-flag": "server.grpc.keepalive.max-connection-age-grace", + "x-format": "duration" + }, + "grpc_server_max_connection_idle": { + "default": "2562047h47m16.854775807s", + "description": "The duration after which an idle connection should be closed. Default: infinity", + "type": "string", + "x-cli-flag": "server.grpc.keepalive.max-connection-idle", + "x-format": "duration" + }, + "grpc_server_max_recv_msg_size": { + "default": 4194304, + "description": "Limit on the size of a gRPC message this server can receive (bytes).", + "type": "number", + "x-cli-flag": "server.grpc-max-recv-msg-size-bytes" + }, + "grpc_server_max_send_msg_size": { + "default": 4194304, + "description": "Limit on the size of a gRPC message this server can send (bytes).", + "type": "number", + "x-cli-flag": "server.grpc-max-send-msg-size-bytes" + }, + "grpc_server_min_time_between_pings": { + "default": "10s", + "description": "Minimum amount of time a client should wait before sending a keepalive ping. If client sends keepalive ping more often, server will send GOAWAY and close the connection.", + "type": "string", + "x-cli-flag": "server.grpc.keepalive.min-time-between-pings", + "x-format": "duration" + }, + "grpc_server_num_stream_workers": { + "default": 0, + "description": "Number of worker goroutines that should be used to process incoming streams.Setting this 0 (default) will disable workers and spawn a new goroutine for each stream.", + "type": "number", + "x-cli-flag": "server.grpc_server-num-stream-workers" + }, + "grpc_server_ping_without_stream_allowed": { + "default": true, + "description": "If true, server allows keepalive pings even when there are no active streams(RPCs). If false, and client sends ping when there are no active streams, server will send GOAWAY and close the connection.", + "type": "boolean", + "x-cli-flag": "server.grpc.keepalive.ping-without-stream-allowed" + }, + "grpc_tls_config": { + "properties": { + "cert_file": { + "description": "GRPC TLS server cert path.", + "type": "string", + "x-cli-flag": "server.grpc-tls-cert-path" + }, + "client_auth_type": { + "description": "GRPC TLS Client Auth type.", + "type": "string", + "x-cli-flag": "server.grpc-tls-client-auth" + }, + "client_ca_file": { + "description": "GRPC TLS Client CA path.", + "type": "string", + "x-cli-flag": "server.grpc-tls-ca-path" + }, + "key_file": { + "description": "GRPC TLS server key path.", + "type": "string", + "x-cli-flag": "server.grpc-tls-key-path" + } + }, + "type": "object" + }, + "http_listen_address": { + "description": "HTTP server listen address.", + "type": "string", + "x-cli-flag": "server.http-listen-address" + }, + "http_listen_conn_limit": { + "default": 0, + "description": "Maximum number of simultaneous http connections, \u003c=0 to disable", + "type": "number", + "x-cli-flag": "server.http-conn-limit" + }, + "http_listen_network": { + "default": "tcp", + "description": "HTTP server listen network, default tcp", + "type": "string", + "x-cli-flag": "server.http-listen-network" + }, + "http_listen_port": { + "default": 80, + "description": "HTTP server listen port.", + "type": "number", + "x-cli-flag": "server.http-listen-port" + }, + "http_path_prefix": { + "description": "Base path to serve all API routes from (e.g. /v1/)", + "type": "string", + "x-cli-flag": "server.path-prefix" + }, + "http_server_idle_timeout": { + "default": "2m0s", + "description": "Idle timeout for HTTP server", + "type": "string", + "x-cli-flag": "server.http-idle-timeout", + "x-format": "duration" + }, + "http_server_read_timeout": { + "default": "30s", + "description": "Read timeout for HTTP server", + "type": "string", + "x-cli-flag": "server.http-read-timeout", + "x-format": "duration" + }, + "http_server_write_timeout": { + "default": "30s", + "description": "Write timeout for HTTP server", + "type": "string", + "x-cli-flag": "server.http-write-timeout", + "x-format": "duration" + }, + "http_tls_config": { + "properties": { + "cert_file": { + "description": "HTTP server cert path.", + "type": "string", + "x-cli-flag": "server.http-tls-cert-path" + }, + "client_auth_type": { + "description": "HTTP TLS Client Auth type.", + "type": "string", + "x-cli-flag": "server.http-tls-client-auth" + }, + "client_ca_file": { + "description": "HTTP TLS Client CA path.", + "type": "string", + "x-cli-flag": "server.http-tls-ca-path" + }, + "key_file": { + "description": "HTTP server key path.", + "type": "string", + "x-cli-flag": "server.http-tls-key-path" + } + }, + "type": "object" + }, + "log_format": { + "default": "logfmt", + "description": "Output log messages in the given format. Valid formats: [logfmt, json]", + "type": "string", + "x-cli-flag": "log.format" + }, + "log_level": { + "default": "info", + "description": "Only log messages with the given severity or above. Valid levels: [debug, info, warn, error]", + "type": "string", + "x-cli-flag": "log.level" + }, + "log_request_at_info_level_enabled": { + "default": false, + "description": "Optionally log requests at info level instead of debug level. Applies to request headers as well if server.log-request-headers is enabled.", + "type": "boolean", + "x-cli-flag": "server.log-request-at-info-level-enabled" + }, + "log_request_exclude_headers_list": { + "description": "Comma separated list of headers to exclude from loggin. Only used if server.log-request-headers is true.", + "type": "string", + "x-cli-flag": "server.log-request-headers-exclude-list" + }, + "log_request_headers": { + "default": false, + "description": "Optionally log request headers.", + "type": "boolean", + "x-cli-flag": "server.log-request-headers" + }, + "log_source_ips_enabled": { + "default": false, + "description": "Optionally log the source IPs.", + "type": "boolean", + "x-cli-flag": "server.log-source-ips-enabled" + }, + "log_source_ips_header": { + "description": "Header field storing the source IPs. Only used if server.log-source-ips-enabled is true. If not set the default Forwarded, X-Real-IP and X-Forwarded-For headers are used", + "type": "string", + "x-cli-flag": "server.log-source-ips-header" + }, + "log_source_ips_regex": { + "description": "Regex for matching the source IPs. Only used if server.log-source-ips-enabled is true. If not set the default Forwarded, X-Real-IP and X-Forwarded-For headers are used", + "type": "string", + "x-cli-flag": "server.log-source-ips-regex" + }, + "register_instrumentation": { + "default": true, + "description": "Register the intrumentation handlers (/metrics etc).", + "type": "boolean", + "x-cli-flag": "server.register-instrumentation" + }, + "tls_cipher_suites": { + "description": "Comma-separated list of cipher suites to use. If blank, the default Go cipher suites is used.", + "type": "string", + "x-cli-flag": "server.tls-cipher-suites" + }, + "tls_min_version": { + "description": "Minimum TLS version to use. Allowed values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13. If blank, the Go TLS minimum version is used.", + "type": "string", + "x-cli-flag": "server.tls-min-version" + } + }, + "type": "object" + }, + "storage_config": { + "description": "The storage_config configures the storage type Cortex uses.", + "properties": { + "engine": { + "default": "blocks", + "description": "The storage engine to use: blocks is the only supported option today.", + "type": "string", + "x-cli-flag": "store.engine" + } + }, + "type": "object" + }, + "store_gateway_config": { + "description": "The store_gateway_config configures the store-gateway service used by the blocks storage.", + "properties": { + "disabled_tenants": { + "description": "Comma separated list of tenants whose store metrics this storegateway cannot process. If specified, a storegateway that would normally pick the specified tenant(s) for processing will ignore them instead.", + "type": "string", + "x-cli-flag": "store-gateway.disabled-tenants" + }, + "enabled_tenants": { + "description": "Comma separated list of tenants whose store metrics this storegateway can process. If specified, only these tenants will be handled by storegateway, otherwise this storegateway will be enabled for all the tenants in the store-gateway cluster.", + "type": "string", + "x-cli-flag": "store-gateway.enabled-tenants" + }, + "hedged_request": { + "properties": { + "enabled": { + "default": false, + "description": "If true, hedged requests are applied to object store calls. It can help with reducing tail latency.", + "type": "boolean", + "x-cli-flag": "store-gateway.hedged-request.enabled" + }, + "max_requests": { + "default": 3, + "description": "Maximum number of hedged requests allowed for each initial request. A high number can reduce latency but increase internal calls.", + "type": "number", + "x-cli-flag": "store-gateway.hedged-request.max-requests" + }, + "quantile": { + "default": 0.9, + "description": "It is used to calculate a latency threshold to trigger hedged requests. For example, additional requests are triggered when the initial request response time exceeds the 90th percentile.", + "type": "number", + "x-cli-flag": "store-gateway.hedged-request.quantile" + } + }, + "type": "object" + }, + "query_protection": { + "properties": { + "rejection": { + "properties": { + "threshold": { + "properties": { + "cpu_utilization": { + "default": 0, + "description": "EXPERIMENTAL: Max CPU utilization that this ingester can reach before rejecting new query request (across all tenants) in percentage, between 0 and 1. monitored_resources config must include the resource type. 0 to disable.", + "type": "number", + "x-cli-flag": "store-gateway.query-protection.rejection.threshold.cpu-utilization" + }, + "heap_utilization": { + "default": 0, + "description": "EXPERIMENTAL: Max heap utilization that this ingester can reach before rejecting new query request (across all tenants) in percentage, between 0 and 1. monitored_resources config must include the resource type. 0 to disable.", + "type": "number", + "x-cli-flag": "store-gateway.query-protection.rejection.threshold.heap-utilization" + } + }, + "type": "object" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "sharding_enabled": { + "default": false, + "description": "Shard blocks across multiple store gateway instances. This option needs be set both on the store-gateway and querier when running in microservices mode.", + "type": "boolean", + "x-cli-flag": "store-gateway.sharding-enabled" + }, + "sharding_ring": { + "description": "The hash ring configuration. This option is required only if blocks sharding is enabled.", + "properties": { + "detailed_metrics_enabled": { + "default": true, + "description": "Set to true to enable ring detailed metrics. These metrics provide detailed information, such as token count and ownership per tenant. Disabling them can significantly decrease the number of metrics emitted.", + "type": "boolean", + "x-cli-flag": "store-gateway.sharding-ring.detailed-metrics-enabled" + }, + "final_sleep": { + "default": "0s", + "description": "The sleep seconds when store-gateway is shutting down. Need to be close to or larger than KV Store information propagation delay", + "type": "string", + "x-cli-flag": "store-gateway.sharding-ring.final-sleep", + "x-format": "duration" + }, + "heartbeat_period": { + "default": "15s", + "description": "Period at which to heartbeat to the ring. 0 = disabled.", + "type": "string", + "x-cli-flag": "store-gateway.sharding-ring.heartbeat-period", + "x-format": "duration" + }, + "heartbeat_timeout": { + "default": "1m0s", + "description": "The heartbeat timeout after which store gateways are considered unhealthy within the ring. 0 = never (timeout disabled). This option needs be set both on the store-gateway and querier when running in microservices mode.", + "type": "string", + "x-cli-flag": "store-gateway.sharding-ring.heartbeat-timeout", + "x-format": "duration" + }, + "instance_availability_zone": { + "description": "The availability zone where this instance is running. Required if zone-awareness is enabled.", + "type": "string", + "x-cli-flag": "store-gateway.sharding-ring.instance-availability-zone" + }, + "instance_interface_names": { + "default": "[eth0 en0]", + "description": "Name of network interface to read address from.", + "items": { + "type": "string" + }, + "type": "array", + "x-cli-flag": "store-gateway.sharding-ring.instance-interface-names" + }, + "keep_instance_in_the_ring_on_shutdown": { + "default": false, + "description": "True to keep the store gateway instance in the ring when it shuts down. The instance will then be auto-forgotten from the ring after 10*heartbeat_timeout.", + "type": "boolean", + "x-cli-flag": "store-gateway.sharding-ring.keep-instance-in-the-ring-on-shutdown" + }, + "kvstore": { + "description": "The key-value store used to share the hash ring across multiple instances. This option needs be set both on the store-gateway and querier when running in microservices mode.", + "properties": { + "consul": { + "$ref": "#/definitions/consul_config" + }, + "dynamodb": { + "properties": { + "max_cas_retries": { + "default": 10, + "description": "Maximum number of retries for DDB KV CAS.", + "type": "number", + "x-cli-flag": "store-gateway.sharding-ring.dynamodb.max-cas-retries" + }, + "puller_sync_time": { + "default": "1m0s", + "description": "Time to refresh local ring with information on dynamodb.", + "type": "string", + "x-cli-flag": "store-gateway.sharding-ring.dynamodb.puller-sync-time", + "x-format": "duration" + }, + "region": { + "description": "Region to access dynamodb.", + "type": "string", + "x-cli-flag": "store-gateway.sharding-ring.dynamodb.region" + }, + "table_name": { + "description": "Table name to use on dynamodb.", + "type": "string", + "x-cli-flag": "store-gateway.sharding-ring.dynamodb.table-name" + }, + "timeout": { + "default": "2m0s", + "description": "Timeout of dynamoDbClient requests. Default is 2m.", + "type": "string", + "x-cli-flag": "store-gateway.sharding-ring.dynamodb.timeout", + "x-format": "duration" + }, + "ttl": { + "default": "0s", + "description": "Time to expire items on dynamodb.", + "type": "string", + "x-cli-flag": "store-gateway.sharding-ring.dynamodb.ttl-time", + "x-format": "duration" + } + }, + "type": "object" + }, + "etcd": { + "$ref": "#/definitions/etcd_config" + }, + "multi": { + "properties": { + "mirror_enabled": { + "default": false, + "description": "Mirror writes to secondary store.", + "type": "boolean", + "x-cli-flag": "store-gateway.sharding-ring.multi.mirror-enabled" + }, + "mirror_timeout": { + "default": "2s", + "description": "Timeout for storing value to secondary store.", + "type": "string", + "x-cli-flag": "store-gateway.sharding-ring.multi.mirror-timeout", + "x-format": "duration" + }, + "primary": { + "description": "Primary backend storage used by multi-client.", + "type": "string", + "x-cli-flag": "store-gateway.sharding-ring.multi.primary" + }, + "secondary": { + "description": "Secondary backend storage used by multi-client.", + "type": "string", + "x-cli-flag": "store-gateway.sharding-ring.multi.secondary" + } + }, + "type": "object" + }, + "prefix": { + "default": "collectors/", + "description": "The prefix for the keys in the store. Should end with a /.", + "type": "string", + "x-cli-flag": "store-gateway.sharding-ring.prefix" + }, + "store": { + "default": "consul", + "description": "Backend storage to use for the ring. Supported values are: consul, dynamodb, etcd, inmemory, memberlist, multi.", + "type": "string", + "x-cli-flag": "store-gateway.sharding-ring.store" + } + }, + "type": "object" + }, + "replication_factor": { + "default": 3, + "description": "The replication factor to use when sharding blocks. This option needs be set both on the store-gateway and querier when running in microservices mode.", + "type": "number", + "x-cli-flag": "store-gateway.sharding-ring.replication-factor" + }, + "tokens_file_path": { + "description": "File path where tokens are stored. If empty, tokens are not stored at shutdown and restored at startup.", + "type": "string", + "x-cli-flag": "store-gateway.sharding-ring.tokens-file-path" + }, + "wait_instance_state_timeout": { + "default": "10m0s", + "description": "Timeout for waiting on store-gateway to become desired state in the ring.", + "type": "string", + "x-cli-flag": "store-gateway.sharding-ring.wait-instance-state-timeout", + "x-format": "duration" + }, + "wait_stability_max_duration": { + "default": "5m0s", + "description": "Maximum time to wait for ring stability at startup. If the store-gateway ring keeps changing after this period of time, the store-gateway will start anyway.", + "type": "string", + "x-cli-flag": "store-gateway.sharding-ring.wait-stability-max-duration", + "x-format": "duration" + }, + "wait_stability_min_duration": { + "default": "1m0s", + "description": "Minimum time to wait for ring stability at startup. 0 to disable.", + "type": "string", + "x-cli-flag": "store-gateway.sharding-ring.wait-stability-min-duration", + "x-format": "duration" + }, + "zone_awareness_enabled": { + "default": false, + "description": "True to enable zone-awareness and replicate blocks across different availability zones.", + "type": "boolean", + "x-cli-flag": "store-gateway.sharding-ring.zone-awareness-enabled" + } + }, + "type": "object" + }, + "sharding_strategy": { + "default": "default", + "description": "The sharding strategy to use. Supported values are: default, shuffle-sharding.", + "type": "string", + "x-cli-flag": "store-gateway.sharding-strategy" + } + }, + "type": "object" + }, + "tracing_config": { + "description": "The tracing_config configures backends cortex uses.", + "properties": { + "otel": { + "properties": { + "exporter_type": { + "description": "enhance/modify traces/propagators for specific exporter. If empty, OTEL defaults will apply. Supported values are: `awsxray.`", + "type": "string", + "x-cli-flag": "tracing.otel.exporter-type" + }, + "otlp_endpoint": { + "description": "otl collector endpoint that the driver will use to send spans.", + "type": "string", + "x-cli-flag": "tracing.otel.otlp-endpoint" + }, + "round_robin": { + "default": false, + "description": "If enabled, use round_robin gRPC load balancing policy. By default, use pick_first policy. For more details, please refer to https://github.com/grpc/grpc/blob/master/doc/load-balancing.md#load-balancing-policies.", + "type": "boolean", + "x-cli-flag": "tracing.otel.round-robin" + }, + "sample_ratio": { + "default": 0.001, + "description": "Fraction of traces to be sampled. Fractions \u003e= 1 means sampling if off and everything is traced.", + "type": "number", + "x-cli-flag": "tracing.otel.sample-ratio" + }, + "tls": { + "properties": { + "tls_ca_path": { + "description": "Path to the CA certificates file to validate server certificate against. If not set, the host's root CA certificates are used.", + "type": "string", + "x-cli-flag": "tracing.otel.tls.tls-ca-path" + }, + "tls_cert_path": { + "description": "Path to the client certificate file, which will be used for authenticating with the server. Also requires the key path to be configured.", + "type": "string", + "x-cli-flag": "tracing.otel.tls.tls-cert-path" + }, + "tls_insecure_skip_verify": { + "default": false, + "description": "Skip validating server certificate.", + "type": "boolean", + "x-cli-flag": "tracing.otel.tls.tls-insecure-skip-verify" + }, + "tls_key_path": { + "description": "Path to the key file for the client certificate. Also requires the client certificate to be configured.", + "type": "string", + "x-cli-flag": "tracing.otel.tls.tls-key-path" + }, + "tls_server_name": { + "description": "Override the expected name on the server certificate.", + "type": "string", + "x-cli-flag": "tracing.otel.tls.tls-server-name" + } + }, + "type": "object" + }, + "tls_enabled": { + "default": false, + "description": "Enable TLS in the GRPC client. This flag needs to be enabled when any other TLS flag is set. If set to false, insecure connection to gRPC server will be used.", + "type": "boolean", + "x-cli-flag": "tracing.otel.tls-enabled" + } + }, + "type": "object" + }, + "type": { + "default": "jaeger", + "description": "Tracing type. OTEL and JAEGER are currently supported. For jaeger `JAEGER_AGENT_HOST` environment variable should also be set. See: https://cortexmetrics.io/docs/guides/tracing .", + "type": "string", + "x-cli-flag": "tracing.type" + } + }, + "type": "object" + } + }, + "description": "JSON Schema for Cortex configuration file", + "properties": { + "alertmanager": { + "$ref": "#/definitions/alertmanager_config" + }, + "alertmanager_storage": { + "$ref": "#/definitions/alertmanager_storage_config" + }, + "api": { + "properties": { + "alertmanager_http_prefix": { + "default": "/alertmanager", + "description": "HTTP URL path under which the Alertmanager ui and api will be served.", + "type": "string", + "x-cli-flag": "http.alertmanager-http-prefix" + }, + "build_info_enabled": { + "default": false, + "description": "If enabled, build Info API will be served by query frontend or querier.", + "type": "boolean", + "x-cli-flag": "api.build-info-enabled" + }, + "cors_origin": { + "default": ".*", + "description": "Regex for CORS origin. It is fully anchored. Example: 'https?://(domain1|domain2)\\.com'", + "type": "string", + "x-cli-flag": "server.cors-origin" + }, + "http_request_headers_to_log": { + "default": [], + "description": "Which HTTP Request headers to add to logs", + "items": { + "type": "string" + }, + "type": "array", + "x-cli-flag": "api.http-request-headers-to-log" + }, + "prometheus_http_prefix": { + "default": "/prometheus", + "description": "HTTP URL path under which the Prometheus api will be served.", + "type": "string", + "x-cli-flag": "http.prometheus-http-prefix" + }, + "querier_default_codec": { + "default": "json", + "description": "Choose default codec for querier response serialization. Supports 'json' and 'protobuf'.", + "type": "string", + "x-cli-flag": "api.querier-default-codec" + }, + "request_id_header": { + "description": "HTTP header that can be used as request id", + "type": "string", + "x-cli-flag": "api.request-id-header" + }, + "response_compression_enabled": { + "default": false, + "description": "Use GZIP compression for API responses. Some endpoints serve large YAML or JSON blobs which can benefit from compression.", + "type": "boolean", + "x-cli-flag": "api.response-compression-enabled" + } + }, + "type": "object" + }, + "auth_enabled": { + "default": true, + "description": "Set to false to disable auth.", + "type": "boolean", + "x-cli-flag": "auth.enabled" + }, + "blocks_storage": { + "$ref": "#/definitions/blocks_storage_config" + }, + "compactor": { + "$ref": "#/definitions/compactor_config" + }, + "configs": { + "$ref": "#/definitions/configs_config" + }, + "distributor": { + "$ref": "#/definitions/distributor_config" + }, + "flusher": { + "$ref": "#/definitions/flusher_config" + }, + "frontend": { + "$ref": "#/definitions/query_frontend_config" + }, + "frontend_worker": { + "$ref": "#/definitions/frontend_worker_config" + }, + "http_prefix": { + "default": "/api/prom", + "description": "HTTP path prefix for Cortex API.", + "type": "string", + "x-cli-flag": "http.prefix" + }, + "ingester": { + "$ref": "#/definitions/ingester_config" + }, + "ingester_client": { + "$ref": "#/definitions/ingester_client_config" + }, + "limits": { + "$ref": "#/definitions/limits_config" + }, + "memberlist": { + "$ref": "#/definitions/memberlist_config" + }, + "parquet_converter": { + "properties": { + "conversion_interval": { + "default": "1m0s", + "description": "How often to check for new TSDB blocks to convert to parquet format.", + "type": "string", + "x-cli-flag": "parquet-converter.conversion-interval", + "x-format": "duration" + }, + "data_dir": { + "default": "./data", + "description": "Local directory path for caching TSDB blocks during parquet conversion.", + "type": "string", + "x-cli-flag": "parquet-converter.data-dir" + }, + "file_buffer_enabled": { + "default": true, + "description": "Enable disk-based write buffering to reduce memory consumption during parquet file generation.", + "type": "boolean", + "x-cli-flag": "parquet-converter.file-buffer-enabled" + }, + "max_rows_per_row_group": { + "default": 1000000, + "description": "Maximum number of time series per parquet row group. Larger values improve compression but may reduce performance during reads.", + "type": "number", + "x-cli-flag": "parquet-converter.max-rows-per-row-group" + }, + "meta_sync_concurrency": { + "default": 20, + "description": "Maximum concurrent goroutines for downloading block metadata from object storage.", + "type": "number", + "x-cli-flag": "parquet-converter.meta-sync-concurrency" + }, + "ring": { + "properties": { + "auto_forget_delay": { + "default": "2m0s", + "description": "Time since last heartbeat before parquet-converter will be removed from ring. 0 to disable", + "type": "string", + "x-cli-flag": "parquet-converter.auto-forget-delay", + "x-format": "duration" + }, + "heartbeat_period": { + "default": "5s", + "description": "Period at which to heartbeat to the ring. 0 = disabled.", + "type": "string", + "x-cli-flag": "parquet-converter.ring.heartbeat-period", + "x-format": "duration" + }, + "heartbeat_timeout": { + "default": "1m0s", + "description": "The heartbeat timeout after which parquet-converter are considered unhealthy within the ring. 0 = never (timeout disabled).", + "type": "string", + "x-cli-flag": "parquet-converter.ring.heartbeat-timeout", + "x-format": "duration" + }, + "kvstore": { + "properties": { + "consul": { + "$ref": "#/definitions/consul_config" + }, + "dynamodb": { + "properties": { + "max_cas_retries": { + "default": 10, + "description": "Maximum number of retries for DDB KV CAS.", + "type": "number", + "x-cli-flag": "parquet-converter.ring.dynamodb.max-cas-retries" + }, + "puller_sync_time": { + "default": "1m0s", + "description": "Time to refresh local ring with information on dynamodb.", + "type": "string", + "x-cli-flag": "parquet-converter.ring.dynamodb.puller-sync-time", + "x-format": "duration" + }, + "region": { + "description": "Region to access dynamodb.", + "type": "string", + "x-cli-flag": "parquet-converter.ring.dynamodb.region" + }, + "table_name": { + "description": "Table name to use on dynamodb.", + "type": "string", + "x-cli-flag": "parquet-converter.ring.dynamodb.table-name" + }, + "timeout": { + "default": "2m0s", + "description": "Timeout of dynamoDbClient requests. Default is 2m.", + "type": "string", + "x-cli-flag": "parquet-converter.ring.dynamodb.timeout", + "x-format": "duration" + }, + "ttl": { + "default": "0s", + "description": "Time to expire items on dynamodb.", + "type": "string", + "x-cli-flag": "parquet-converter.ring.dynamodb.ttl-time", + "x-format": "duration" + } + }, + "type": "object" + }, + "etcd": { + "$ref": "#/definitions/etcd_config" + }, + "multi": { + "properties": { + "mirror_enabled": { + "default": false, + "description": "Mirror writes to secondary store.", + "type": "boolean", + "x-cli-flag": "parquet-converter.ring.multi.mirror-enabled" + }, + "mirror_timeout": { + "default": "2s", + "description": "Timeout for storing value to secondary store.", + "type": "string", + "x-cli-flag": "parquet-converter.ring.multi.mirror-timeout", + "x-format": "duration" + }, + "primary": { + "description": "Primary backend storage used by multi-client.", + "type": "string", + "x-cli-flag": "parquet-converter.ring.multi.primary" + }, + "secondary": { + "description": "Secondary backend storage used by multi-client.", + "type": "string", + "x-cli-flag": "parquet-converter.ring.multi.secondary" + } + }, + "type": "object" + }, + "prefix": { + "default": "collectors/", + "description": "The prefix for the keys in the store. Should end with a /.", + "type": "string", + "x-cli-flag": "parquet-converter.ring.prefix" + }, + "store": { + "default": "consul", + "description": "Backend storage to use for the ring. Supported values are: consul, dynamodb, etcd, inmemory, memberlist, multi.", + "type": "string", + "x-cli-flag": "parquet-converter.ring.store" + } + }, + "type": "object" + }, + "tokens_file_path": { + "description": "File path where tokens are stored. If empty, tokens are not stored at shutdown and restored at startup.", + "type": "string", + "x-cli-flag": "parquet-converter.ring.tokens-file-path" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "querier": { + "$ref": "#/definitions/querier_config" + }, + "query_range": { + "$ref": "#/definitions/query_range_config" + }, + "query_scheduler": { + "properties": { + "grpc_client_config": { + "description": "This configures the gRPC client used to report errors back to the query-frontend.", + "properties": { + "backoff_config": { + "properties": { + "max_period": { + "default": "10s", + "description": "Maximum delay when backing off.", + "type": "string", + "x-cli-flag": "query-scheduler.grpc-client-config.backoff-max-period", + "x-format": "duration" + }, + "max_retries": { + "default": 10, + "description": "Number of times to backoff and retry before failing.", + "type": "number", + "x-cli-flag": "query-scheduler.grpc-client-config.backoff-retries" + }, + "min_period": { + "default": "100ms", + "description": "Minimum delay when backing off.", + "type": "string", + "x-cli-flag": "query-scheduler.grpc-client-config.backoff-min-period", + "x-format": "duration" + } + }, + "type": "object" + }, + "backoff_on_ratelimits": { + "default": false, + "description": "Enable backoff and retry when we hit ratelimits.", + "type": "boolean", + "x-cli-flag": "query-scheduler.grpc-client-config.backoff-on-ratelimits" + }, + "connect_timeout": { + "default": "5s", + "description": "The maximum amount of time to establish a connection. A value of 0 means using default gRPC client connect timeout 20s.", + "type": "string", + "x-cli-flag": "query-scheduler.grpc-client-config.connect-timeout", + "x-format": "duration" + }, + "grpc_compression": { + "description": "Use compression when sending messages. Supported values are: 'gzip', 'snappy', 'snappy-block' ,'zstd' and '' (disable compression)", + "type": "string", + "x-cli-flag": "query-scheduler.grpc-client-config.grpc-compression" + }, + "max_recv_msg_size": { + "default": 104857600, + "description": "gRPC client max receive message size (bytes).", + "type": "number", + "x-cli-flag": "query-scheduler.grpc-client-config.grpc-max-recv-msg-size" + }, + "max_send_msg_size": { + "default": 16777216, + "description": "gRPC client max send message size (bytes).", + "type": "number", + "x-cli-flag": "query-scheduler.grpc-client-config.grpc-max-send-msg-size" + }, + "rate_limit": { + "default": 0, + "description": "Rate limit for gRPC client; 0 means disabled.", + "type": "number", + "x-cli-flag": "query-scheduler.grpc-client-config.grpc-client-rate-limit" + }, + "rate_limit_burst": { + "default": 0, + "description": "Rate limit burst for gRPC client.", + "type": "number", + "x-cli-flag": "query-scheduler.grpc-client-config.grpc-client-rate-limit-burst" + }, + "tls_ca_path": { + "description": "Path to the CA certificates file to validate server certificate against. If not set, the host's root CA certificates are used.", + "type": "string", + "x-cli-flag": "query-scheduler.grpc-client-config.tls-ca-path" + }, + "tls_cert_path": { + "description": "Path to the client certificate file, which will be used for authenticating with the server. Also requires the key path to be configured.", + "type": "string", + "x-cli-flag": "query-scheduler.grpc-client-config.tls-cert-path" + }, + "tls_enabled": { + "default": false, + "description": "Enable TLS in the GRPC client. This flag needs to be enabled when any other TLS flag is set. If set to false, insecure connection to gRPC server will be used.", + "type": "boolean", + "x-cli-flag": "query-scheduler.grpc-client-config.tls-enabled" + }, + "tls_insecure_skip_verify": { + "default": false, + "description": "Skip validating server certificate.", + "type": "boolean", + "x-cli-flag": "query-scheduler.grpc-client-config.tls-insecure-skip-verify" + }, + "tls_key_path": { + "description": "Path to the key file for the client certificate. Also requires the client certificate to be configured.", + "type": "string", + "x-cli-flag": "query-scheduler.grpc-client-config.tls-key-path" + }, + "tls_server_name": { + "description": "Override the expected name on the server certificate.", + "type": "string", + "x-cli-flag": "query-scheduler.grpc-client-config.tls-server-name" + } + }, + "type": "object" + }, + "querier_forget_delay": { + "default": "0s", + "description": "If a querier disconnects without sending notification about graceful shutdown, the query-scheduler will keep the querier in the tenant's shard until the forget delay has passed. This feature is useful to reduce the blast radius when shuffle-sharding is enabled.", + "type": "string", + "x-cli-flag": "query-scheduler.querier-forget-delay", + "x-format": "duration" + } + }, + "type": "object" + }, + "resource_monitor": { + "properties": { + "cpu_rate_interval": { + "default": "1m0s", + "description": "Interval to calculate average CPU rate. Must be greater than resource monitor interval.", + "type": "string", + "x-cli-flag": "resource-monitor.cpu-rate-interval", + "x-format": "duration" + }, + "interval": { + "default": "100ms", + "description": "Update interval of resource monitor. Must be greater than 0.", + "type": "string", + "x-cli-flag": "resource-monitor.interval", + "x-format": "duration" + }, + "resources": { + "description": "Comma-separated list of resources to monitor. Supported values are cpu and heap, which tracks metrics from github.com/prometheus/procfs and runtime/metrics that are close estimates. Empty string to disable.", + "type": "string", + "x-cli-flag": "resource-monitor.resources" + } + }, + "type": "object" + }, + "ruler": { + "$ref": "#/definitions/ruler_config" + }, + "ruler_storage": { + "$ref": "#/definitions/ruler_storage_config" + }, + "runtime_config": { + "$ref": "#/definitions/runtime_configuration_storage_config" + }, + "server": { + "$ref": "#/definitions/server_config" + }, + "storage": { + "$ref": "#/definitions/storage_config" + }, + "store_gateway": { + "$ref": "#/definitions/store_gateway_config" + }, + "target": { + "default": "all", + "description": "Comma-separated list of Cortex modules to load. The alias 'all' can be used in the list to load a number of core modules and will enable single-binary mode. Use '-modules' command line flag to get a list of available modules, and to see which modules are included in 'all'.", + "type": "string", + "x-cli-flag": "target" + }, + "tenant_federation": { + "properties": { + "enabled": { + "default": false, + "description": "If enabled on all Cortex services, queries can be federated across multiple tenants. The tenant IDs involved need to be specified separated by a `|` character in the `X-Scope-OrgID` header (experimental).", + "type": "boolean", + "x-cli-flag": "tenant-federation.enabled" + }, + "max_concurrent": { + "default": 16, + "description": "The number of workers used to process each federated query.", + "type": "number", + "x-cli-flag": "tenant-federation.max-concurrent" + }, + "max_tenant": { + "default": 0, + "description": "A maximum number of tenants to query at once. 0 means no limit.", + "type": "number", + "x-cli-flag": "tenant-federation.max-tenant" + }, + "regex_matcher_enabled": { + "default": false, + "description": "[Experimental] If enabled, the `X-Scope-OrgID` header value can accept a regex and the matched tenantIDs are automatically involved. The regex matching rule follows the Prometheus, see the detail: https://prometheus.io/docs/prometheus/latest/querying/basics/#regular-expressions. The user discovery is based on scanning block storage, so new users can get queries after uploading a block (generally 2h).", + "type": "boolean", + "x-cli-flag": "tenant-federation.regex-matcher-enabled" + }, + "user_sync_interval": { + "default": "5m0s", + "description": "[Experimental] If the regex matcher is enabled, it specifies how frequently to scan users. The scanned users are used to calculate matched tenantIDs. The scanning strategy depends on the `-blocks-storage.users-scanner.strategy`.", + "type": "string", + "x-cli-flag": "tenant-federation.user-sync-interval", + "x-format": "duration" + } + }, + "type": "object" + }, + "tracing": { + "$ref": "#/definitions/tracing_config" + } + }, + "title": "Cortex Configuration Schema", + "type": "object" +} diff --git a/tools/doc-generator/json_schema_writer.go b/tools/doc-generator/json_schema_writer.go new file mode 100644 index 0000000000..475e9f4e4d --- /dev/null +++ b/tools/doc-generator/json_schema_writer.go @@ -0,0 +1,238 @@ +package main + +import ( + "encoding/json" + "fmt" + "io" + "strings" +) + +type JSONSchemaWriter struct { + out io.Writer +} + +func NewJSONSchemaWriter(out io.Writer) *JSONSchemaWriter { + return &JSONSchemaWriter{out: out} +} + +func (w *JSONSchemaWriter) WriteSchema(blocks []*configBlock) error { + schema := w.generateJSONSchema(blocks) + + encoder := json.NewEncoder(w.out) + encoder.SetIndent("", " ") + return encoder.Encode(schema) +} + +func (w *JSONSchemaWriter) generateJSONSchema(blocks []*configBlock) map[string]any { + schema := map[string]any{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://raw.githubusercontent.com/cortexproject/cortex/master/schemas/cortex-config-schema.json", + "title": "Cortex Configuration Schema", + "description": "JSON Schema for Cortex configuration file", + "type": "object", + "properties": map[string]any{}, + "definitions": map[string]any{}, + } + + properties := schema["properties"].(map[string]any) + definitions := schema["definitions"].(map[string]any) + + // Process each config block + for _, block := range blocks { + if block.name == "" { + // This is the root block, process its entries as top-level properties + w.processBlockEntries(block, properties, definitions) + } else { + // This is a named block, add it to definitions + definitions[block.name] = w.generateBlockSchema(block) + } + } + + return schema +} + +func (w *JSONSchemaWriter) processBlockEntries(block *configBlock, properties map[string]any, definitions map[string]any) { + for _, entry := range block.entries { + switch entry.kind { + case "field": + properties[entry.name] = w.generateFieldSchema(entry) + case "block": + if entry.root { + // Root blocks are referenced via $ref + properties[entry.name] = map[string]any{ + "$ref": fmt.Sprintf("#/definitions/%s", entry.block.name), + } + // Add the block to definitions if not already there + if _, exists := definitions[entry.block.name]; !exists { + definitions[entry.block.name] = w.generateBlockSchema(entry.block) + } + } else { + // Inline blocks are embedded directly + properties[entry.name] = w.generateBlockSchema(entry.block) + } + } + } +} + +func (w *JSONSchemaWriter) generateBlockSchema(block *configBlock) map[string]any { + obj := map[string]any{ + "type": "object", + "properties": map[string]any{}, + } + + if block.desc != "" { + obj["description"] = block.desc + } + + properties := obj["properties"].(map[string]any) + + for _, entry := range block.entries { + switch entry.kind { + case "field": + properties[entry.name] = w.generateFieldSchema(entry) + case "block": + if entry.root { + // Reference to another root block + properties[entry.name] = map[string]any{ + "$ref": fmt.Sprintf("#/definitions/%s", entry.block.name), + } + } else { + // Inline nested block + properties[entry.name] = w.generateBlockSchema(entry.block) + } + } + } + + return obj +} + +func (w *JSONSchemaWriter) generateFieldSchema(entry *configEntry) map[string]any { + prop := map[string]any{ + "type": w.getJSONType(entry.fieldType), + } + + // Add description + if entry.fieldDesc != "" { + prop["description"] = entry.fieldDesc + } + + // Add default value + if entry.fieldDefault != "" { + prop["default"] = w.parseDefaultValue(entry.fieldDefault, entry.fieldType) + } + + // Add CLI flag information + if entry.fieldFlag != "" { + prop["x-cli-flag"] = entry.fieldFlag + } + + // Add format hints based on type + switch entry.fieldType { + case "duration": + prop["x-format"] = "duration" + prop["type"] = "string" + case "url": + prop["format"] = "uri" + prop["type"] = "string" + case "time": + prop["format"] = "date-time" + prop["type"] = "string" + } + + // Handle list types + if strings.HasPrefix(entry.fieldType, "list of ") { + prop["type"] = "array" + itemType := strings.TrimPrefix(entry.fieldType, "list of ") + prop["items"] = map[string]any{ + "type": w.getJSONType(itemType), + } + } + + // Handle map types + if strings.HasPrefix(entry.fieldType, "map of ") { + prop["type"] = "object" + prop["additionalProperties"] = true + } + + // Mark required fields + if entry.required { + prop["x-required"] = true + } + + return prop +} + +func (w *JSONSchemaWriter) getJSONType(goType string) string { + switch goType { + case "string": + return "string" + case "int", "float": + return "number" + case "boolean": + return "boolean" + case "duration", "url", "time": + return "string" + default: + // Handle complex types + if strings.HasPrefix(goType, "list of ") { + return "array" + } + if strings.HasPrefix(goType, "map of ") { + return "object" + } + // Default to string for unknown types + return "string" + } +} + +func (w *JSONSchemaWriter) parseDefaultValue(defaultStr, goType string) any { + if defaultStr == "" { + return nil + } + + switch goType { + case "boolean": + return defaultStr == "true" + case "int": + if val, err := parseInt(defaultStr); err == nil { + return val + } + return defaultStr + case "float": + if val, err := parseFloat(defaultStr); err == nil { + return val + } + return defaultStr + default: + // Handle special cases + if defaultStr == "[]" { + return []any{} + } + if strings.HasPrefix(defaultStr, "[") && strings.HasSuffix(defaultStr, "]") { + // Try to parse as JSON array + var arr []any + if err := json.Unmarshal([]byte(defaultStr), &arr); err == nil { + return arr + } + } + return defaultStr + } +} + +// Helper functions for parsing +func parseInt(s string) (int64, error) { + var result int64 + var err error + if strings.Contains(s, "e+") || strings.Contains(s, "E+") { + return 0, fmt.Errorf("scientific notation not supported") + } + _, err = fmt.Sscanf(s, "%d", &result) + return result, err +} + +func parseFloat(s string) (float64, error) { + var result float64 + var err error + _, err = fmt.Sscanf(s, "%f", &result) + return result, err +} diff --git a/tools/doc-generator/main.go b/tools/doc-generator/main.go index 58a1787ee2..2e13d3c906 100644 --- a/tools/doc-generator/main.go +++ b/tools/doc-generator/main.go @@ -3,6 +3,7 @@ package main import ( "flag" "fmt" + "io" "os" "path/filepath" "reflect" @@ -313,11 +314,63 @@ func generateBlockMarkdown(blocks []*configBlock, blockName, fieldName string) s return "" } +func generateJSONSchemaMain(outputFile string) { + // Create a Cortex config instance + cfg := &cortex.Config{} + + // Parse CLI flags to map them with config fields + flags := parseFlags(cfg) + + // Parse the config structure + blocks, err := parseConfig(nil, cfg, flags, map[string]struct{}{}) + if err != nil { + fmt.Fprintf(os.Stderr, "Error parsing config: %s\n", err.Error()) + os.Exit(1) + } + + // Annotate the flags prefix for each root block + annotateFlagPrefix(blocks) + + // Generate JSON schema + var output io.Writer + if outputFile != "" { + file, err := os.Create(outputFile) + if err != nil { + fmt.Fprintf(os.Stderr, "Error creating file: %s\n", err.Error()) + os.Exit(1) + } + defer file.Close() + output = file + } else { + output = os.Stdout + } + + writer := NewJSONSchemaWriter(output) + err = writer.WriteSchema(blocks) + if err != nil { + fmt.Fprintf(os.Stderr, "Error writing JSON schema: %s\n", err.Error()) + os.Exit(1) + } + + if outputFile != "" { + fmt.Printf("JSON schema written to %s\n", outputFile) + } +} + func main() { // Parse the generator flags. + jsonSchema := flag.Bool("json-schema", false, "Generate JSON schema instead of markdown documentation") + outputFile := flag.String("output", "", "Output file for schema (default: stdout)") flag.Parse() + + // If JSON schema generation is requested + if *jsonSchema { + generateJSONSchemaMain(*outputFile) + return + } + if flag.NArg() != 1 { - fmt.Fprintf(os.Stderr, "Usage: doc-generator template-file") + fmt.Fprintf(os.Stderr, "Usage: doc-generator [-json-schema] [-output file] template-file") os.Exit(1) } diff --git a/tools/doc-generator/parser.go b/tools/doc-generator/parser.go index 178799eefe..ecc8c4ba4a 100644 --- a/tools/doc-generator/parser.go +++ b/tools/doc-generator/parser.go @@ -73,7 +73,7 @@ func parseFlags(cfg flagext.Registerer) map[uintptr]*flag.Flag { return flags } -func parseConfig(block *configBlock, cfg interface{}, flags map[uintptr]*flag.Flag, addedRootBlocks map[string]struct{}) ([]*configBlock, error) { +func parseConfig(block *configBlock, cfg any, flags map[uintptr]*flag.Flag, addedRootBlocks map[string]struct{}) ([]*configBlock, error) { blocks := []*configBlock{} // If the input block is nil it means we're generating the doc for the top-level block @@ -517,7 +517,7 @@ func parseDocTag(f reflect.StructField) map[string]string { return cfg } - for _, entry := range strings.Split(tag, "|") { + for entry := range strings.SplitSeq(tag, "|") { parts := strings.SplitN(entry, "=", 2) switch len(parts) { diff --git a/tools/doc-generator/writer.go b/tools/doc-generator/writer.go index 0b8d6b64bc..c765cea642 100644 --- a/tools/doc-generator/writer.go +++ b/tools/doc-generator/writer.go @@ -90,9 +90,9 @@ func (w *specWriter) writeComment(comment string, indent int) { } wrapped := strings.TrimSpace(wordwrap.WrapString(comment, uint(maxLineWidth-indent-2))) - lines := strings.Split(wrapped, "\n") + lines := strings.SplitSeq(wrapped, "\n") - for _, line := range lines { + for line := range lines { w.out.WriteString(pad(indent) + "# " + line + "\n") } } diff --git a/tools/query-audit/auditor.go b/tools/query-audit/auditor.go index 17ff61c3db..7d66b750bf 100644 --- a/tools/query-audit/auditor.go +++ b/tools/query-audit/auditor.go @@ -36,7 +36,7 @@ func (a *Auditor) auditMatrix(x, y model.Matrix) (diff Diff, err error) { return diff, errors.Errorf("different # of series: control=%d, other=%d", len(x), len(y)) } - for i := 0; i < len(x); i++ { + for i := range x { xSeries, ySeries := x[i], y[i] if !xSeries.Metric.Equal(ySeries.Metric) { return diff, errors.Errorf("mismatched metrics: %v vs %v", xSeries.Metric, ySeries.Metric) @@ -52,7 +52,7 @@ func (a *Auditor) auditMatrix(x, y model.Matrix) (diff Diff, err error) { ) } - for j := 0; j < len(xVals); j++ { + for j := range xVals { xSample, ySample := xVals[j], yVals[j] if xSample.Timestamp != ySample.Timestamp { diff --git a/tools/querytee/proxy_endpoint.go b/tools/querytee/proxy_endpoint.go index 20083d0bd8..1a8adf7c37 100644 --- a/tools/querytee/proxy_endpoint.go +++ b/tools/querytee/proxy_endpoint.go @@ -81,7 +81,6 @@ func (p *ProxyEndpoint) executeBackendRequests(r *http.Request, resCh chan *back wg.Add(len(p.backends)) for _, b := range p.backends { - b := b go func() { defer wg.Done() diff --git a/tools/querytee/proxy_endpoint_test.go b/tools/querytee/proxy_endpoint_test.go index 6cc2c669b1..7fa2225a1e 100644 --- a/tools/querytee/proxy_endpoint_test.go +++ b/tools/querytee/proxy_endpoint_test.go @@ -86,7 +86,6 @@ func Test_ProxyEndpoint_waitBackendResponseForDownstream(t *testing.T) { } for testName, testData := range tests { - testData := testData t.Run(testName, func(t *testing.T) { endpoint := NewProxyEndpoint(testData.backends, "test", NewProxyMetrics(nil), log.NewNopLogger(), nil) diff --git a/vendor/cloud.google.com/go/auth/CHANGES.md b/vendor/cloud.google.com/go/auth/CHANGES.md index 500c34cf44..66131916eb 100644 --- a/vendor/cloud.google.com/go/auth/CHANGES.md +++ b/vendor/cloud.google.com/go/auth/CHANGES.md @@ -1,5 +1,34 @@ # Changelog +## [0.16.2](https://github.com/googleapis/google-cloud-go/compare/auth/v0.16.1...auth/v0.16.2) (2025-06-04) + + +### Bug Fixes + +* **auth:** Add back DirectPath misconfiguration logging ([#11162](https://github.com/googleapis/google-cloud-go/issues/11162)) ([8d52da5](https://github.com/googleapis/google-cloud-go/commit/8d52da58da5a0ed77a0f6307d1b561bc045406a1)) +* **auth:** Remove s2a fallback option ([#12354](https://github.com/googleapis/google-cloud-go/issues/12354)) ([d5acc59](https://github.com/googleapis/google-cloud-go/commit/d5acc599cd775ddc404349e75906fa02e8ff133e)) + +## [0.16.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.16.0...auth/v0.16.1) (2025-04-23) + + +### Bug Fixes + +* **auth:** Clone detectopts before assigning TokenBindingType ([#11881](https://github.com/googleapis/google-cloud-go/issues/11881)) ([2167b02](https://github.com/googleapis/google-cloud-go/commit/2167b020fdc43b517c2b6ecca264a10e357ea035)) + +## [0.16.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.15.0...auth/v0.16.0) (2025-04-14) + + +### Features + +* **auth/credentials:** Return X.509 certificate chain as subject token ([#11948](https://github.com/googleapis/google-cloud-go/issues/11948)) ([d445a3f](https://github.com/googleapis/google-cloud-go/commit/d445a3f66272ffd5c39c4939af9bebad4582631c)), refs [#11757](https://github.com/googleapis/google-cloud-go/issues/11757) +* **auth:** Configure DirectPath bound credentials from AllowedHardBoundTokens ([#11665](https://github.com/googleapis/google-cloud-go/issues/11665)) ([0fc40bc](https://github.com/googleapis/google-cloud-go/commit/0fc40bcf4e4673704df0973e9fa65957395d7bb4)) + + +### Bug Fixes + +* **auth:** Allow non-default SA credentials for DP ([#11828](https://github.com/googleapis/google-cloud-go/issues/11828)) ([3a996b4](https://github.com/googleapis/google-cloud-go/commit/3a996b4129e6d0a34dfda6671f535d5aefb26a82)) +* **auth:** Restore calling DialContext ([#11930](https://github.com/googleapis/google-cloud-go/issues/11930)) ([9ec9a29](https://github.com/googleapis/google-cloud-go/commit/9ec9a29494e93197edbaf45aba28984801e9770a)), refs [#11118](https://github.com/googleapis/google-cloud-go/issues/11118) + ## [0.15.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.14.1...auth/v0.15.0) (2025-02-19) diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go index a822064234..f4f49f175d 100644 --- a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go @@ -413,7 +413,10 @@ func newSubjectTokenProvider(o *Options) (subjectTokenProvider, error) { if cert.UseDefaultCertificateConfig && cert.CertificateConfigLocation != "" { return nil, errors.New("credentials: \"certificate\" object cannot specify both a certificate_config_location and use_default_certificate_config=true") } - return &x509Provider{}, nil + return &x509Provider{ + TrustChainPath: o.CredentialSource.Certificate.TrustChainPath, + ConfigFilePath: o.CredentialSource.Certificate.CertificateConfigLocation, + }, nil } return nil, errors.New("credentials: unable to parse credential source") } diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/x509_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/x509_provider.go index 115df5881f..d86ca593c8 100644 --- a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/x509_provider.go +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/x509_provider.go @@ -17,27 +17,184 @@ package externalaccount import ( "context" "crypto/tls" + "crypto/x509" + "encoding/base64" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io/fs" "net/http" + "os" + "strings" "time" "cloud.google.com/go/auth/internal/transport/cert" ) -// x509Provider implements the subjectTokenProvider type for -// x509 workload identity credentials. Because x509 credentials -// rely on an mTLS connection to represent the 3rd party identity -// rather than a subject token, this provider will always return -// an empty string when a subject token is requested by the external account -// token provider. +// x509Provider implements the subjectTokenProvider type for x509 workload +// identity credentials. This provider retrieves and formats a JSON array +// containing the leaf certificate and trust chain (if provided) as +// base64-encoded strings. This JSON array serves as the subject token for +// mTLS authentication. type x509Provider struct { + // TrustChainPath is the path to the file containing the trust chain certificates. + // The file should contain one or more PEM-encoded certificates. + TrustChainPath string + // ConfigFilePath is the path to the configuration file containing the path + // to the leaf certificate file. + ConfigFilePath string } +const pemCertificateHeader = "-----BEGIN CERTIFICATE-----" + func (xp *x509Provider) providerType() string { return x509ProviderType } -func (xp *x509Provider) subjectToken(ctx context.Context) (string, error) { - return "", nil +// loadLeafCertificate loads and parses the leaf certificate from the specified +// configuration file. It retrieves the certificate path from the config file, +// reads the certificate file, and parses the certificate data. +func loadLeafCertificate(configFilePath string) (*x509.Certificate, error) { + // Get the path to the certificate file from the configuration file. + path, err := cert.GetCertificatePath(configFilePath) + if err != nil { + return nil, fmt.Errorf("failed to get certificate path from config file: %w", err) + } + leafCertBytes, err := os.ReadFile(path) + if err != nil { + return nil, fmt.Errorf("failed to read leaf certificate file: %w", err) + } + // Parse the certificate bytes. + return parseCertificate(leafCertBytes) +} + +// encodeCert encodes a x509.Certificate to a base64 string. +func encodeCert(cert *x509.Certificate) string { + // cert.Raw contains the raw DER-encoded certificate. Encode the raw certificate bytes to base64. + return base64.StdEncoding.EncodeToString(cert.Raw) +} + +// parseCertificate parses a PEM-encoded certificate from the given byte slice. +func parseCertificate(certData []byte) (*x509.Certificate, error) { + if len(certData) == 0 { + return nil, errors.New("invalid certificate data: empty input") + } + // Decode the PEM-encoded data. + block, _ := pem.Decode(certData) + if block == nil { + return nil, errors.New("invalid PEM-encoded certificate data: no PEM block found") + } + if block.Type != "CERTIFICATE" { + return nil, fmt.Errorf("invalid PEM-encoded certificate data: expected CERTIFICATE block type, got %s", block.Type) + } + // Parse the DER-encoded certificate. + certificate, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, fmt.Errorf("failed to parse certificate: %w", err) + } + return certificate, nil +} + +// readTrustChain reads a file of PEM-encoded X.509 certificates and returns a slice of parsed certificates. +// It splits the file content into PEM certificate blocks and parses each one. +func readTrustChain(trustChainPath string) ([]*x509.Certificate, error) { + certificateTrustChain := []*x509.Certificate{} + + // If no trust chain path is provided, return an empty slice. + if trustChainPath == "" { + return certificateTrustChain, nil + } + + // Read the trust chain file. + trustChainData, err := os.ReadFile(trustChainPath) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + return nil, fmt.Errorf("trust chain file not found: %w", err) + } + return nil, fmt.Errorf("failed to read trust chain file: %w", err) + } + + // Split the file content into PEM certificate blocks. + certBlocks := strings.Split(string(trustChainData), pemCertificateHeader) + + // Iterate over each certificate block. + for _, certBlock := range certBlocks { + // Trim whitespace from the block. + certBlock = strings.TrimSpace(certBlock) + + if certBlock != "" { + // Add the PEM header to the block. + certData := pemCertificateHeader + "\n" + certBlock + + // Parse the certificate data. + cert, err := parseCertificate([]byte(certData)) + if err != nil { + return nil, fmt.Errorf("error parsing certificate from trust chain file: %w", err) + } + + // Append the certificate to the trust chain. + certificateTrustChain = append(certificateTrustChain, cert) + } + } + + return certificateTrustChain, nil +} + +// subjectToken retrieves the X.509 subject token. It loads the leaf +// certificate and, if a trust chain path is configured, the trust chain +// certificates. It then constructs a JSON array containing the base64-encoded +// leaf certificate and each base64-encoded certificate in the trust chain. +// The leaf certificate must be at the top of the trust chain file. This JSON +// array is used as the subject token for mTLS authentication. +func (xp *x509Provider) subjectToken(context.Context) (string, error) { + // Load the leaf certificate. + leafCert, err := loadLeafCertificate(xp.ConfigFilePath) + if err != nil { + return "", fmt.Errorf("failed to load leaf certificate: %w", err) + } + + // Read the trust chain. + trustChain, err := readTrustChain(xp.TrustChainPath) + if err != nil { + return "", fmt.Errorf("failed to read trust chain: %w", err) + } + + // Initialize the certificate chain with the leaf certificate. + certChain := []string{encodeCert(leafCert)} + + // If there is a trust chain, add certificates to the certificate chain. + if len(trustChain) > 0 { + firstCert := encodeCert(trustChain[0]) + + // If the first certificate in the trust chain is not the same as the leaf certificate, add it to the chain. + if firstCert != certChain[0] { + certChain = append(certChain, firstCert) + } + + // Iterate over the remaining certificates in the trust chain. + for i := 1; i < len(trustChain); i++ { + encoded := encodeCert(trustChain[i]) + + // Return an error if the current certificate is the same as the leaf certificate. + if encoded == certChain[0] { + return "", errors.New("the leaf certificate must be at the top of the trust chain file") + } + + // Add the current certificate to the chain. + certChain = append(certChain, encoded) + } + } + + // Convert the certificate chain to a JSON array of base64-encoded strings. + jsonChain, err := json.Marshal(certChain) + if err != nil { + return "", fmt.Errorf("failed to format certificate data: %w", err) + } + + // Return the JSON-formatted certificate chain. + return string(jsonChain), nil + } // createX509Client creates a new client that is configured with mTLS, using the diff --git a/vendor/cloud.google.com/go/auth/grpctransport/directpath.go b/vendor/cloud.google.com/go/auth/grpctransport/directpath.go index c541da2b1a..69d6d0034e 100644 --- a/vendor/cloud.google.com/go/auth/grpctransport/directpath.go +++ b/vendor/cloud.google.com/go/auth/grpctransport/directpath.go @@ -20,13 +20,18 @@ import ( "os" "strconv" "strings" + "time" "cloud.google.com/go/auth" + "cloud.google.com/go/auth/credentials" "cloud.google.com/go/auth/internal/compute" + "golang.org/x/time/rate" "google.golang.org/grpc" grpcgoogle "google.golang.org/grpc/credentials/google" ) +var logRateLimiter = rate.Sometimes{Interval: 1 * time.Second} + func isDirectPathEnabled(endpoint string, opts *Options) bool { if opts.InternalOptions != nil && !opts.InternalOptions.EnableDirectPath { return false @@ -97,14 +102,36 @@ func isDirectPathXdsUsed(o *Options) bool { return false } +func isDirectPathBoundTokenEnabled(opts *InternalOptions) bool { + for _, ev := range opts.AllowHardBoundTokens { + if ev == "ALTS" { + return true + } + } + return false +} + // configureDirectPath returns some dial options and an endpoint to use if the // configuration allows the use of direct path. If it does not the provided // grpcOpts and endpoint are returned. -func configureDirectPath(grpcOpts []grpc.DialOption, opts *Options, endpoint string, creds *auth.Credentials) ([]grpc.DialOption, string) { +func configureDirectPath(grpcOpts []grpc.DialOption, opts *Options, endpoint string, creds *auth.Credentials) ([]grpc.DialOption, string, error) { + logRateLimiter.Do(func() { + logDirectPathMisconfig(endpoint, creds, opts) + }) if isDirectPathEnabled(endpoint, opts) && compute.OnComputeEngine() && isTokenProviderDirectPathCompatible(creds, opts) { // Overwrite all of the previously specific DialOptions, DirectPath uses its own set of credentials and certificates. + defaultCredetialsOptions := grpcgoogle.DefaultCredentialsOptions{PerRPCCreds: &grpcCredentialsProvider{creds: creds}} + if isDirectPathBoundTokenEnabled(opts.InternalOptions) && isTokenProviderComputeEngine(creds) { + optsClone := opts.resolveDetectOptions() + optsClone.TokenBindingType = credentials.ALTSHardBinding + altsCreds, err := credentials.DetectDefault(optsClone) + if err != nil { + return nil, "", err + } + defaultCredetialsOptions.ALTSPerRPCCreds = &grpcCredentialsProvider{creds: altsCreds} + } grpcOpts = []grpc.DialOption{ - grpc.WithCredentialsBundle(grpcgoogle.NewDefaultCredentialsWithOptions(grpcgoogle.DefaultCredentialsOptions{PerRPCCreds: &grpcCredentialsProvider{creds: creds}}))} + grpc.WithCredentialsBundle(grpcgoogle.NewDefaultCredentialsWithOptions(defaultCredetialsOptions))} if timeoutDialerOption != nil { grpcOpts = append(grpcOpts, timeoutDialerOption) } @@ -129,5 +156,22 @@ func configureDirectPath(grpcOpts []grpc.DialOption, opts *Options, endpoint str } // TODO: add support for system parameters (quota project, request reason) via chained interceptor. } - return grpcOpts, endpoint + return grpcOpts, endpoint, nil +} + +func logDirectPathMisconfig(endpoint string, creds *auth.Credentials, o *Options) { + + // Case 1: does not enable DirectPath + if !isDirectPathEnabled(endpoint, o) { + o.logger().Warn("DirectPath is disabled. To enable, please set the EnableDirectPath option along with the EnableDirectPathXds option.") + } else { + // Case 2: credential is not correctly set + if !isTokenProviderDirectPathCompatible(creds, o) { + o.logger().Warn("DirectPath is disabled. Please make sure the token source is fetched from GCE metadata server and the default service account is used.") + } + // Case 3: not running on GCE + if !compute.OnComputeEngine() { + o.logger().Warn("DirectPath is disabled. DirectPath is only available in a GCE environment.") + } + } } diff --git a/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go b/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go index 4610a48551..834aef41c8 100644 --- a/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go +++ b/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go @@ -304,17 +304,18 @@ func dial(ctx context.Context, secure bool, opts *Options) (*grpc.ClientConn, er // This condition is only met for non-DirectPath clients because // TransportTypeMTLSS2A is used only when InternalOptions.EnableDirectPath // is false. + optsClone := opts.resolveDetectOptions() if transportCreds.TransportType == transport.TransportTypeMTLSS2A { // Check that the client allows requesting hard-bound token for the transport type mTLS using S2A. for _, ev := range opts.InternalOptions.AllowHardBoundTokens { if ev == "MTLS_S2A" { - opts.DetectOpts.TokenBindingType = credentials.MTLSHardBinding + optsClone.TokenBindingType = credentials.MTLSHardBinding break } } } var err error - creds, err = credentials.DetectDefault(opts.resolveDetectOptions()) + creds, err = credentials.DetectDefault(optsClone) if err != nil { return nil, err } @@ -341,7 +342,10 @@ func dial(ctx context.Context, secure bool, opts *Options) (*grpc.ClientConn, er }), ) // Attempt Direct Path - grpcOpts, transportCreds.Endpoint = configureDirectPath(grpcOpts, opts, transportCreds.Endpoint, creds) + grpcOpts, transportCreds.Endpoint, err = configureDirectPath(grpcOpts, opts, transportCreds.Endpoint, creds) + if err != nil { + return nil, err + } } // Add tracing, but before the other options, so that clients can override the @@ -350,7 +354,7 @@ func dial(ctx context.Context, secure bool, opts *Options) (*grpc.ClientConn, er grpcOpts = addOpenTelemetryStatsHandler(grpcOpts, opts) grpcOpts = append(grpcOpts, opts.GRPCDialOpts...) - return grpc.Dial(transportCreds.Endpoint, grpcOpts...) + return grpc.DialContext(ctx, transportCreds.Endpoint, grpcOpts...) } // grpcKeyProvider satisfies https://pkg.go.dev/google.golang.org/grpc/credentials#PerRPCCredentials. diff --git a/vendor/cloud.google.com/go/auth/internal/credsfile/filetype.go b/vendor/cloud.google.com/go/auth/internal/credsfile/filetype.go index 3be6e5bbb4..606347304c 100644 --- a/vendor/cloud.google.com/go/auth/internal/credsfile/filetype.go +++ b/vendor/cloud.google.com/go/auth/internal/credsfile/filetype.go @@ -127,6 +127,7 @@ type ExecutableConfig struct { type CertificateConfig struct { UseDefaultCertificateConfig bool `json:"use_default_certificate_config"` CertificateConfigLocation string `json:"certificate_config_location"` + TrustChainPath string `json:"trust_chain_path"` } // ServiceAccountImpersonationInfo has impersonation configuration. diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cba.go b/vendor/cloud.google.com/go/auth/internal/transport/cba.go index b1f0fcf937..14bca966ec 100644 --- a/vendor/cloud.google.com/go/auth/internal/transport/cba.go +++ b/vendor/cloud.google.com/go/auth/internal/transport/cba.go @@ -31,7 +31,6 @@ import ( "cloud.google.com/go/auth/internal" "cloud.google.com/go/auth/internal/transport/cert" "github.com/google/s2a-go" - "github.com/google/s2a-go/fallback" "google.golang.org/grpc/credentials" ) @@ -170,18 +169,9 @@ func GetGRPCTransportCredsAndEndpoint(opts *Options) (*GRPCTransportCredentials, return &GRPCTransportCredentials{defaultTransportCreds, config.endpoint, TransportTypeUnknown}, nil } - var fallbackOpts *s2a.FallbackOptions - // In case of S2A failure, fall back to the endpoint that would've been used without S2A. - if fallbackHandshake, err := fallback.DefaultFallbackClientHandshakeFunc(config.endpoint); err == nil { - fallbackOpts = &s2a.FallbackOptions{ - FallbackClientHandshakeFunc: fallbackHandshake, - } - } - s2aTransportCreds, err := s2a.NewClientCreds(&s2a.ClientOptions{ S2AAddress: s2aAddr, TransportCreds: transportCredsForS2A, - FallbackOpts: fallbackOpts, }) if err != nil { // Use default if we cannot initialize S2A client transport credentials. @@ -218,23 +208,9 @@ func GetHTTPTransportConfig(opts *Options) (cert.Provider, func(context.Context, return config.clientCertSource, nil, nil } - var fallbackOpts *s2a.FallbackOptions - // In case of S2A failure, fall back to the endpoint that would've been used without S2A. - if fallbackURL, err := url.Parse(config.endpoint); err == nil { - if fallbackDialer, fallbackServerAddr, err := fallback.DefaultFallbackDialerAndAddress(fallbackURL.Hostname()); err == nil { - fallbackOpts = &s2a.FallbackOptions{ - FallbackDialer: &s2a.FallbackDialer{ - Dialer: fallbackDialer, - ServerAddr: fallbackServerAddr, - }, - } - } - } - dialTLSContextFunc := s2a.NewS2ADialTLSContextFunc(&s2a.ClientOptions{ S2AAddress: s2aAddr, TransportCreds: transportCredsForS2A, - FallbackOpts: fallbackOpts, }) return nil, dialTLSContextFunc, nil } diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go b/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go index 347aaced72..b2a3be23c7 100644 --- a/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go +++ b/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go @@ -37,6 +37,36 @@ type certificateConfig struct { CertConfigs certConfigs `json:"cert_configs"` } +// getconfigFilePath determines the path to the certificate configuration file. +// It first checks for the presence of an environment variable that specifies +// the file path. If the environment variable is not set, it falls back to +// a default configuration file path. +func getconfigFilePath() string { + envFilePath := util.GetConfigFilePathFromEnv() + if envFilePath != "" { + return envFilePath + } + return util.GetDefaultConfigFilePath() + +} + +// GetCertificatePath retrieves the certificate file path from the provided +// configuration file. If the configFilePath is empty, it attempts to load +// the configuration from a well-known gcloud location. +// This function is exposed to allow other packages, such as the +// externalaccount package, to retrieve the certificate path without needing +// to load the entire certificate configuration. +func GetCertificatePath(configFilePath string) (string, error) { + if configFilePath == "" { + configFilePath = getconfigFilePath() + } + certFile, _, err := getCertAndKeyFiles(configFilePath) + if err != nil { + return "", err + } + return certFile, nil +} + // NewWorkloadX509CertProvider creates a certificate source // that reads a certificate and private key file from the local file system. // This is intended to be used for workload identity federation. @@ -47,14 +77,8 @@ type certificateConfig struct { // a well-known gcloud location. func NewWorkloadX509CertProvider(configFilePath string) (Provider, error) { if configFilePath == "" { - envFilePath := util.GetConfigFilePathFromEnv() - if envFilePath != "" { - configFilePath = envFilePath - } else { - configFilePath = util.GetDefaultConfigFilePath() - } + configFilePath = getconfigFilePath() } - certFile, keyFile, err := getCertAndKeyFiles(configFilePath) if err != nil { return nil, err diff --git a/vendor/cloud.google.com/go/iam/CHANGES.md b/vendor/cloud.google.com/go/iam/CHANGES.md index 6bfd910506..7839f3b895 100644 --- a/vendor/cloud.google.com/go/iam/CHANGES.md +++ b/vendor/cloud.google.com/go/iam/CHANGES.md @@ -1,6 +1,50 @@ # Changes +## [1.5.2](https://github.com/googleapis/google-cloud-go/compare/iam/v1.5.1...iam/v1.5.2) (2025-04-15) + + +### Bug Fixes + +* **iam:** Update google.golang.org/api to 0.229.0 ([3319672](https://github.com/googleapis/google-cloud-go/commit/3319672f3dba84a7150772ccb5433e02dab7e201)) + +## [1.5.1](https://github.com/googleapis/google-cloud-go/compare/iam/v1.5.0...iam/v1.5.1) (2025-04-15) + + +### Documentation + +* **iam:** Formatting update for ListPolicyBindingsRequest ([dfdf404](https://github.com/googleapis/google-cloud-go/commit/dfdf404138728724aa6305c5c465ecc6fe5b1264)) +* **iam:** Minor doc update for ListPrincipalAccessBoundaryPoliciesResponse ([20f762c](https://github.com/googleapis/google-cloud-go/commit/20f762c528726a3f038d3e1f37e8a4952118badf)) +* **iam:** Minor doc update for ListPrincipalAccessBoundaryPoliciesResponse ([20f762c](https://github.com/googleapis/google-cloud-go/commit/20f762c528726a3f038d3e1f37e8a4952118badf)) + +## [1.5.0](https://github.com/googleapis/google-cloud-go/compare/iam/v1.4.2...iam/v1.5.0) (2025-03-31) + + +### Features + +* **iam:** New client(s) ([#11933](https://github.com/googleapis/google-cloud-go/issues/11933)) ([d5cb2e5](https://github.com/googleapis/google-cloud-go/commit/d5cb2e58334c6963cc46885f565fe3b19c52cb63)) + +## [1.4.2](https://github.com/googleapis/google-cloud-go/compare/iam/v1.4.1...iam/v1.4.2) (2025-03-13) + + +### Bug Fixes + +* **iam:** Update golang.org/x/net to 0.37.0 ([1144978](https://github.com/googleapis/google-cloud-go/commit/11449782c7fb4896bf8b8b9cde8e7441c84fb2fd)) + +## [1.4.1](https://github.com/googleapis/google-cloud-go/compare/iam/v1.4.0...iam/v1.4.1) (2025-03-06) + + +### Bug Fixes + +* **iam:** Fix out-of-sync version.go ([28f0030](https://github.com/googleapis/google-cloud-go/commit/28f00304ebb13abfd0da2f45b9b79de093cca1ec)) + +## [1.4.0](https://github.com/googleapis/google-cloud-go/compare/iam/v1.3.1...iam/v1.4.0) (2025-02-12) + + +### Features + +* **iam/admin:** Regenerate client ([#11570](https://github.com/googleapis/google-cloud-go/issues/11570)) ([eab87d7](https://github.com/googleapis/google-cloud-go/commit/eab87d73bea884c636ec88f03b9aa90102a2833f)), refs [#8219](https://github.com/googleapis/google-cloud-go/issues/8219) + ## [1.3.1](https://github.com/googleapis/google-cloud-go/compare/iam/v1.3.0...iam/v1.3.1) (2025-01-02) diff --git a/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go index f975d76191..2b57ae3b82 100644 --- a/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go +++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go index 0c82db752b..745de05ba2 100644 --- a/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go +++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go index a2e42f8786..0eba150896 100644 --- a/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go +++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/iam/apiv1/iampb/resource_policy_member.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/resource_policy_member.pb.go index 361d79752a..c3339e26c4 100644 --- a/vendor/cloud.google.com/go/iam/apiv1/iampb/resource_policy_member.pb.go +++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/resource_policy_member.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json index b1a50e8738..d72e823299 100644 --- a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json +++ b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json @@ -959,16 +959,6 @@ "release_level": "preview", "library_type": "GAPIC_AUTO" }, - "cloud.google.com/go/dataform/apiv1alpha2": { - "api_shortname": "dataform", - "distribution_name": "cloud.google.com/go/dataform/apiv1alpha2", - "description": "Dataform API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dataform/latest/apiv1alpha2", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, "cloud.google.com/go/dataform/apiv1beta1": { "api_shortname": "dataform", "distribution_name": "cloud.google.com/go/dataform/apiv1beta1", @@ -1299,6 +1289,16 @@ "release_level": "stable", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/financialservices/apiv1": { + "api_shortname": "financialservices", + "distribution_name": "cloud.google.com/go/financialservices/apiv1", + "description": "Financial Services API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/financialservices/latest/apiv1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/firestore": { "api_shortname": "firestore", "distribution_name": "cloud.google.com/go/firestore", @@ -1789,6 +1789,16 @@ "release_level": "stable", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/modelarmor/apiv1": { + "api_shortname": "modelarmor", + "distribution_name": "cloud.google.com/go/modelarmor/apiv1", + "description": "Model Armor API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/modelarmor/latest/apiv1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/monitoring/apiv3/v2": { "api_shortname": "monitoring", "distribution_name": "cloud.google.com/go/monitoring/apiv3/v2", @@ -2269,16 +2279,6 @@ "release_level": "stable", "library_type": "GAPIC_AUTO" }, - "cloud.google.com/go/resourcesettings/apiv1": { - "api_shortname": "resourcesettings", - "distribution_name": "cloud.google.com/go/resourcesettings/apiv1", - "description": "Resource Settings API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/resourcesettings/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, "cloud.google.com/go/retail/apiv2": { "api_shortname": "retail", "distribution_name": "cloud.google.com/go/retail/apiv2", diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert.pb.go index 222e1d170a..24ca1414bb 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert_service.pb.go index 02103f8cd4..ba0c4f65f2 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert_service.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert_service.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/common.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/common.pb.go index e301262a2f..81b8c8f5e4 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/common.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/common.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/dropped_labels.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/dropped_labels.pb.go index 0dbf58e435..0c3ac5a1c8 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/dropped_labels.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/dropped_labels.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group.pb.go index 11d1a62d35..c35046ac71 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group_service.pb.go index 3cfa112bb4..fbdf9ef54f 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group_service.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group_service.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric.pb.go index 1961a1e3a5..ae7eea5b6f 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric_service.pb.go index 9e7cbcdd2f..39b9595241 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric_service.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric_service.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/mutation_record.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/mutation_record.pb.go index 5fd4f33807..e03d89efe4 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/mutation_record.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/mutation_record.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification.pb.go index 48d69d1431..0d5cacbecb 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification_service.pb.go index 9ae6580b1b..fd0230036d 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification_service.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification_service.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/query_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/query_service.pb.go index b1f18a6d25..6402f18ca1 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/query_service.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/query_service.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service.pb.go index aa462351d7..a9d2ae8cb6 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service_service.pb.go index 01520d88a2..08c2e08e26 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service_service.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service_service.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze.pb.go index ef7fbded0c..861e045f2d 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze_service.pb.go index bfe661ea70..c562d60bcc 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze_service.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze_service.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/span_context.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/span_context.pb.go index 3555d6e0a1..23f42835f1 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/span_context.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/span_context.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime.pb.go index 7e122ade52..f303ac2515 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime_service.pb.go index d2958b8658..9ea159bbd2 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime_service.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime_service.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/monitoring/internal/version.go b/vendor/cloud.google.com/go/monitoring/internal/version.go index 291a237fe1..e199c1168a 100644 --- a/vendor/cloud.google.com/go/monitoring/internal/version.go +++ b/vendor/cloud.google.com/go/monitoring/internal/version.go @@ -15,4 +15,4 @@ package internal // Version is the current tagged release of the library. -const Version = "1.24.0" +const Version = "1.24.2" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md index 926ed3882c..d99d530934 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md @@ -1,12 +1,18 @@ # Release History +## 1.18.1 (2025-07-10) + +### Bugs Fixed + +* Fixed incorrect request/response logging try info when logging a request that's being retried. +* Fixed a data race in `ResourceID.String()` + ## 1.18.0 (2025-04-03) ### Features Added * Added `AccessToken.RefreshOn` and updated `BearerTokenPolicy` to consider nonzero values of it when deciding whether to request a new token - ## 1.17.1 (2025-03-20) ### Other Changes diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go index d9a4e36dcc..a08d3d0ffa 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go @@ -27,7 +27,8 @@ var RootResourceID = &ResourceID{ } // ResourceID represents a resource ID such as `/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myRg`. -// Don't create this type directly, use ParseResourceID instead. +// Don't create this type directly, use [ParseResourceID] instead. Fields are considered immutable and shouldn't be +// modified after creation. type ResourceID struct { // Parent is the parent ResourceID of this instance. // Can be nil if there is no parent. @@ -85,28 +86,6 @@ func ParseResourceID(id string) (*ResourceID, error) { // String returns the string of the ResourceID func (id *ResourceID) String() string { - if len(id.stringValue) > 0 { - return id.stringValue - } - - if id.Parent == nil { - return "" - } - - builder := strings.Builder{} - builder.WriteString(id.Parent.String()) - - if id.isChild { - builder.WriteString(fmt.Sprintf("/%s", id.ResourceType.lastType())) - if len(id.Name) > 0 { - builder.WriteString(fmt.Sprintf("/%s", id.Name)) - } - } else { - builder.WriteString(fmt.Sprintf("/providers/%s/%s/%s", id.ResourceType.Namespace, id.ResourceType.Type, id.Name)) - } - - id.stringValue = builder.String() - return id.stringValue } @@ -185,6 +164,15 @@ func (id *ResourceID) init(parent *ResourceID, resourceType ResourceType, name s id.isChild = isChild id.ResourceType = resourceType id.Name = name + id.stringValue = id.Parent.String() + if id.isChild { + id.stringValue += "/" + id.ResourceType.lastType() + if id.Name != "" { + id.stringValue += "/" + id.Name + } + } else { + id.stringValue += fmt.Sprintf("/providers/%s/%s/%s", id.ResourceType.Namespace, id.ResourceType.Type, id.Name) + } } func appendNext(parent *ResourceID, parts []string, id string) (*ResourceID, error) { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/ci.yml index 99348527b5..b81b621038 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/ci.yml +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/ci.yml @@ -27,3 +27,5 @@ extends: template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml parameters: ServiceDirectory: azcore + TriggeringPaths: + - /eng/ diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go index e3e2d4e588..9b3f5badb5 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go @@ -71,7 +71,8 @@ func (ov opValues) get(value any) bool { // NewRequestFromRequest creates a new policy.Request with an existing *http.Request // Exported as runtime.NewRequestFromRequest(). func NewRequestFromRequest(req *http.Request) (*Request, error) { - policyReq := &Request{req: req} + // populate values so that the same instance is propagated across policies + policyReq := &Request{req: req, values: opValues{}} if req.Body != nil { // we can avoid a body copy here if the underlying stream is already a @@ -117,7 +118,8 @@ func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*Reque if !(req.URL.Scheme == "http" || req.URL.Scheme == "https") { return nil, fmt.Errorf("unsupported protocol scheme %s", req.URL.Scheme) } - return &Request{req: req}, nil + // populate values so that the same instance is propagated across policies + return &Request{req: req, values: opValues{}}, nil } // Body returns the original body specified when the Request was created. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go index 85514db3b8..23788b14d9 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go @@ -40,5 +40,5 @@ const ( Module = "azcore" // Version is the semantic version (see http://semver.org) of this module. - Version = "v1.18.0" + Version = "v1.18.1" ) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go index bb37a5efb4..368a2199e0 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go @@ -103,7 +103,7 @@ type RetryOptions struct { // RetryDelay specifies the initial amount of delay to use before retrying an operation. // The value is used only if the HTTP response does not contain a Retry-After header. // The delay increases exponentially with each retry up to the maximum specified by MaxRetryDelay. - // The default value is four seconds. A value less than zero means no delay between retries. + // The default value is 800 milliseconds. A value less than zero means no delay between retries. RetryDelay time.Duration // MaxRetryDelay specifies the maximum delay allowed before retrying an operation. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md index f5bd8586b9..84e7941e4f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md @@ -1,5 +1,10 @@ # Release History +## 1.10.1 (2025-06-10) + +### Bugs Fixed +- `AzureCLICredential` and `AzureDeveloperCLICredential` could wait indefinitely for subprocess output + ## 1.10.0 (2025-05-14) ### Features Added diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD index 2bda7f2a7f..da2094e36b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD @@ -27,6 +27,7 @@ Persistent caches are encrypted at rest using a mechanism that depends on the op | Linux | kernel key retention service (keyctl) | Cache data is lost on system shutdown because kernel keys are stored in memory. Depending on kernel compile options, data may also be lost on logout, or storage may be impossible because the key retention service isn't available. | | macOS | Keychain | Building requires cgo and native build tools. Keychain access requires a graphical session, so persistent caching isn't possible in a headless environment such as an SSH session (macOS as host). | | Windows | Data Protection API (DPAPI) | No specific limitations. | + Persistent caching requires encryption. When the required encryption facility is unuseable, or the application is running on an unsupported OS, the persistent cache constructor returns an error. This doesn't mean that authentication is impossible, only that credentials can't persist authentication data and the application will need to reauthenticate the next time it runs. See the package documentation for examples showing how to configure persistent caching and access cached data for [users][user_example] and [service principals][sp_example]. ### Credentials supporting token caching diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md index 10a4009c37..91f4f05cc0 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md @@ -219,7 +219,7 @@ azd auth token --output json --scope https://management.core.windows.net/.defaul | Error Message |Description| Mitigation | |---|---|---| -|no client ID/tenant ID/token file specified|Incomplete configuration|In most cases these values are provided via environment variables set by Azure Workload Identity.
  • If your application runs on Azure Kubernetes Servide (AKS) or a cluster that has deployed the Azure Workload Identity admission webhook, check pod labels and service account configuration. See the [AKS documentation](https://learn.microsoft.com/azure/aks/workload-identity-deploy-cluster#disable-workload-identity) and [Azure Workload Identity troubleshooting guide](https://azure.github.io/azure-workload-identity/docs/troubleshooting.html) for more details.
  • If your application isn't running on AKS or your cluster hasn't deployed the Workload Identity admission webhook, set these values in `WorkloadIdentityCredentialOptions` +|no client ID/tenant ID/token file specified|Incomplete configuration|In most cases these values are provided via environment variables set by Azure Workload Identity.
    • If your application runs on Azure Kubernetes Service (AKS) or a cluster that has deployed the Azure Workload Identity admission webhook, check pod labels and service account configuration. See the [AKS documentation](https://learn.microsoft.com/azure/aks/workload-identity-deploy-cluster#disable-workload-identity) and [Azure Workload Identity troubleshooting guide](https://azure.github.io/azure-workload-identity/docs/troubleshooting.html) for more details.
    • If your application isn't running on AKS or your cluster hasn't deployed the Workload Identity admission webhook, set these values in `WorkloadIdentityCredentialOptions` ## Troubleshoot AzurePipelinesCredential authentication issues diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go index 36e359a099..0fd03f4563 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go @@ -148,8 +148,14 @@ var defaultAzTokenProvider azTokenProvider = func(ctx context.Context, scopes [] cliCmd.Env = os.Environ() var stderr bytes.Buffer cliCmd.Stderr = &stderr + cliCmd.WaitDelay = 100 * time.Millisecond - output, err := cliCmd.Output() + stdout, err := cliCmd.Output() + if errors.Is(err, exec.ErrWaitDelay) && len(stdout) > 0 { + // The child process wrote to stdout and exited without closing it. + // Swallow this error and return stdout because it may contain a token. + return stdout, nil + } if err != nil { msg := stderr.String() var exErr *exec.ExitError @@ -162,7 +168,7 @@ var defaultAzTokenProvider azTokenProvider = func(ctx context.Context, scopes [] return nil, newCredentialUnavailableError(credNameAzureCLI, msg) } - return output, nil + return stdout, nil } func (c *AzureCLICredential) createAccessToken(tk []byte) (azcore.AccessToken, error) { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_developer_cli_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_developer_cli_credential.go index 46d0b55192..1bd3720b64 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_developer_cli_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_developer_cli_credential.go @@ -130,7 +130,14 @@ var defaultAzdTokenProvider azdTokenProvider = func(ctx context.Context, scopes cliCmd.Env = os.Environ() var stderr bytes.Buffer cliCmd.Stderr = &stderr - output, err := cliCmd.Output() + cliCmd.WaitDelay = 100 * time.Millisecond + + stdout, err := cliCmd.Output() + if errors.Is(err, exec.ErrWaitDelay) && len(stdout) > 0 { + // The child process wrote to stdout and exited without closing it. + // Swallow this error and return stdout because it may contain a token. + return stdout, nil + } if err != nil { msg := stderr.String() var exErr *exec.ExitError @@ -144,7 +151,7 @@ var defaultAzdTokenProvider azdTokenProvider = func(ctx context.Context, scopes } return nil, newCredentialUnavailableError(credNameAzureDeveloperCLI, msg) } - return output, nil + return stdout, nil } func (c *AzureDeveloperCLICredential) createAccessToken(tk []byte) (azcore.AccessToken, error) { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go index e859fba3a0..2b767762fa 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go @@ -14,5 +14,5 @@ const ( module = "github.com/Azure/azure-sdk-for-go/sdk/" + component // Version is the semantic version (see http://semver.org) of this module. - version = "v1.10.0" + version = "v1.10.1" ) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go index a015cc5b20..3219517dab 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go @@ -6,6 +6,7 @@ import ( smithybearer "github.com/aws/smithy-go/auth/bearer" "github.com/aws/smithy-go/logging" "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" ) // HTTPClient provides the interface to provide custom HTTPClients. Generally @@ -192,6 +193,17 @@ type Config struct { // This variable is sourced from environment variable AWS_RESPONSE_CHECKSUM_VALIDATION or // the shared config profile attribute "response_checksum_validation". ResponseChecksumValidation ResponseChecksumValidation + + // Registry of HTTP interceptors. + Interceptors smithyhttp.InterceptorRegistry + + // Priority list of preferred auth scheme IDs. + AuthSchemePreference []string + + // ServiceOptions provides service specific configuration options that will be applied + // when constructing clients for specific services. Each callback function receives the service ID + // and the service's Options struct, allowing for dynamic configuration based on the service. + ServiceOptions []func(string, any) } // NewConfig returns a new Config pointer that can be chained with builder diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go index 8e930fc6f8..b72921f87b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go @@ -3,4 +3,4 @@ package aws // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.36.3" +const goModuleVersion = "1.38.3" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go index 52d59b04bf..5549922ab8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go @@ -260,7 +260,7 @@ func (r *Attempt) handleAttempt( // Get a retry token that will be released after the releaseRetryToken, retryTokenErr := r.retryer.GetRetryToken(ctx, err) if retryTokenErr != nil { - return out, attemptResult, nopRelease, retryTokenErr + return out, attemptResult, nopRelease, errors.Join(err, retryTokenErr) } //------------------------------ diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/stream.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/stream.go index 66aa2bd6ab..32875e0779 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/stream.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/stream.go @@ -59,7 +59,7 @@ func (s *StreamSigner) GetSignature(ctx context.Context, headers, payload []byte prevSignature := s.prevSignature - st := v4Internal.NewSigningTime(signingTime) + st := v4Internal.NewSigningTime(signingTime.UTC()) sigKey := s.signingKeyDeriver.DeriveKey(s.credentials, s.service, s.region, st) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/copy.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/copy.go new file mode 100644 index 0000000000..938cd14c1e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/copy.go @@ -0,0 +1,112 @@ +package awsutil + +import ( + "io" + "reflect" + "time" +) + +// Copy deeply copies a src structure to dst. Useful for copying request and +// response structures. +// +// Can copy between structs of different type, but will only copy fields which +// are assignable, and exist in both structs. Fields which are not assignable, +// or do not exist in both structs are ignored. +func Copy(dst, src interface{}) { + dstval := reflect.ValueOf(dst) + if !dstval.IsValid() { + panic("Copy dst cannot be nil") + } + + rcopy(dstval, reflect.ValueOf(src), true) +} + +// CopyOf returns a copy of src while also allocating the memory for dst. +// src must be a pointer type or this operation will fail. +func CopyOf(src interface{}) (dst interface{}) { + dsti := reflect.New(reflect.TypeOf(src).Elem()) + dst = dsti.Interface() + rcopy(dsti, reflect.ValueOf(src), true) + return +} + +// rcopy performs a recursive copy of values from the source to destination. +// +// root is used to skip certain aspects of the copy which are not valid +// for the root node of a object. +func rcopy(dst, src reflect.Value, root bool) { + if !src.IsValid() { + return + } + + switch src.Kind() { + case reflect.Ptr: + if _, ok := src.Interface().(io.Reader); ok { + if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() { + dst.Elem().Set(src) + } else if dst.CanSet() { + dst.Set(src) + } + } else { + e := src.Type().Elem() + if dst.CanSet() && !src.IsNil() { + if _, ok := src.Interface().(*time.Time); !ok { + if dst.Kind() == reflect.String { + dst.SetString(e.String()) + } else { + dst.Set(reflect.New(e)) + } + } else { + tempValue := reflect.New(e) + tempValue.Elem().Set(src.Elem()) + // Sets time.Time's unexported values + dst.Set(tempValue) + } + } + if dst.Kind() != reflect.String && src.Elem().IsValid() { + // Keep the current root state since the depth hasn't changed + rcopy(dst.Elem(), src.Elem(), root) + } + } + case reflect.Struct: + t := dst.Type() + for i := 0; i < t.NumField(); i++ { + name := t.Field(i).Name + srcVal := src.FieldByName(name) + dstVal := dst.FieldByName(name) + if srcVal.IsValid() && dstVal.CanSet() { + rcopy(dstVal, srcVal, false) + } + } + case reflect.Slice: + if src.IsNil() { + break + } + + s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap()) + dst.Set(s) + for i := 0; i < src.Len(); i++ { + rcopy(dst.Index(i), src.Index(i), false) + } + case reflect.Map: + if src.IsNil() { + break + } + + s := reflect.MakeMap(src.Type()) + dst.Set(s) + for _, k := range src.MapKeys() { + v := src.MapIndex(k) + v2 := reflect.New(v.Type()).Elem() + rcopy(v2, v, false) + dst.SetMapIndex(k, v2) + } + default: + // Assign the value if possible. If its not assignable, the value would + // need to be converted and the impact of that may be unexpected, or is + // not compatible with the dst type. + if src.Type().AssignableTo(dst.Type()) { + dst.Set(src) + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/equal.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/equal.go new file mode 100644 index 0000000000..bcfe51a2b7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/equal.go @@ -0,0 +1,33 @@ +package awsutil + +import ( + "reflect" +) + +// DeepEqual returns if the two values are deeply equal like reflect.DeepEqual. +// In addition to this, this method will also dereference the input values if +// possible so the DeepEqual performed will not fail if one parameter is a +// pointer and the other is not. +// +// DeepEqual will not perform indirection of nested values of the input parameters. +func DeepEqual(a, b interface{}) bool { + ra := reflect.Indirect(reflect.ValueOf(a)) + rb := reflect.Indirect(reflect.ValueOf(b)) + + if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid { + // If the elements are both nil, and of the same type the are equal + // If they are of different types they are not equal + return reflect.TypeOf(a) == reflect.TypeOf(b) + } else if raValid != rbValid { + // Both values must be valid to be equal + return false + } + + // Special casing for strings as typed enumerations are string aliases + // but are not deep equal. + if ra.Kind() == reflect.String && rb.Kind() == reflect.String { + return ra.String() == rb.String() + } + + return reflect.DeepEqual(ra.Interface(), rb.Interface()) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/prettify.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/prettify.go new file mode 100644 index 0000000000..1adecae6b9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/prettify.go @@ -0,0 +1,131 @@ +package awsutil + +import ( + "bytes" + "fmt" + "io" + "reflect" + "strings" +) + +// Prettify returns the string representation of a value. +func Prettify(i interface{}) string { + var buf bytes.Buffer + prettify(reflect.ValueOf(i), 0, &buf) + return buf.String() +} + +// prettify will recursively walk value v to build a textual +// representation of the value. +func prettify(v reflect.Value, indent int, buf *bytes.Buffer) { + isPtr := false + for v.Kind() == reflect.Ptr { + isPtr = true + v = v.Elem() + } + + switch v.Kind() { + case reflect.Struct: + strtype := v.Type().String() + if strtype == "time.Time" { + fmt.Fprintf(buf, "%s", v.Interface()) + break + } else if strings.HasPrefix(strtype, "io.") { + buf.WriteString("") + break + } + + if isPtr { + buf.WriteRune('&') + } + buf.WriteString("{\n") + + names := []string{} + for i := 0; i < v.Type().NumField(); i++ { + name := v.Type().Field(i).Name + f := v.Field(i) + if name[0:1] == strings.ToLower(name[0:1]) { + continue // ignore unexported fields + } + if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() { + continue // ignore unset fields + } + names = append(names, name) + } + + for i, n := range names { + val := v.FieldByName(n) + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(n + ": ") + prettify(val, indent+2, buf) + + if i < len(names)-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + case reflect.Slice: + strtype := v.Type().String() + if strtype == "[]uint8" { + fmt.Fprintf(buf, " len %d", v.Len()) + break + } + + nl, id, id2 := "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) + if isPtr { + buf.WriteRune('&') + } + buf.WriteString("[" + nl) + for i := 0; i < v.Len(); i++ { + buf.WriteString(id2) + prettify(v.Index(i), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString("," + nl) + } + } + + buf.WriteString(nl + id + "]") + case reflect.Map: + if isPtr { + buf.WriteRune('&') + } + buf.WriteString("{\n") + + for i, k := range v.MapKeys() { + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(k.String() + ": ") + prettify(v.MapIndex(k), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + default: + if !v.IsValid() { + fmt.Fprint(buf, "") + return + } + + for v.Kind() == reflect.Interface && !v.IsNil() { + v = v.Elem() + } + + if v.Kind() == reflect.Ptr || v.Kind() == reflect.Struct || v.Kind() == reflect.Map || v.Kind() == reflect.Slice { + prettify(v, indent, buf) + return + } + + format := "%v" + switch v.Interface().(type) { + case string: + format = "%q" + case io.ReadSeeker, io.Reader: + format = "buffer(%p)" + } + fmt.Fprintf(buf, format, v.Interface()) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/string_value.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/string_value.go new file mode 100644 index 0000000000..645df2450f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/string_value.go @@ -0,0 +1,88 @@ +package awsutil + +import ( + "bytes" + "fmt" + "reflect" + "strings" +) + +// StringValue returns the string representation of a value. +func StringValue(i interface{}) string { + var buf bytes.Buffer + stringValue(reflect.ValueOf(i), 0, &buf) + return buf.String() +} + +func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) { + for v.Kind() == reflect.Ptr { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Struct: + buf.WriteString("{\n") + + for i := 0; i < v.Type().NumField(); i++ { + ft := v.Type().Field(i) + fv := v.Field(i) + + if ft.Name[0:1] == strings.ToLower(ft.Name[0:1]) { + continue // ignore unexported fields + } + if (fv.Kind() == reflect.Ptr || fv.Kind() == reflect.Slice) && fv.IsNil() { + continue // ignore unset fields + } + + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(ft.Name + ": ") + + if tag := ft.Tag.Get("sensitive"); tag == "true" { + buf.WriteString("") + } else { + stringValue(fv, indent+2, buf) + } + + buf.WriteString(",\n") + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + case reflect.Slice: + nl, id, id2 := "", "", "" + if v.Len() > 3 { + nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) + } + buf.WriteString("[" + nl) + for i := 0; i < v.Len(); i++ { + buf.WriteString(id2) + stringValue(v.Index(i), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString("," + nl) + } + } + + buf.WriteString(nl + id + "]") + case reflect.Map: + buf.WriteString("{\n") + + for i, k := range v.MapKeys() { + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(k.String() + ": ") + stringValue(v.MapIndex(k), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + default: + format := "%v" + switch v.Interface().(type) { + case string: + format = "%q" + } + fmt.Fprintf(buf, format, v.Interface()) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md index eae3e16af7..e0ebf39032 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md @@ -1,3 +1,46 @@ +# v1.4.6 (2025-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.5 (2025-08-27) + +* **Dependency Update**: Update to smithy-go v1.23.0. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.4 (2025-08-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.3 (2025-08-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.2 (2025-08-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.1 (2025-07-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.0 (2025-07-28) + +* **Feature**: Add support for HTTP interceptors. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.37 (2025-07-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.36 (2025-06-17) + +* **Dependency Update**: Update to smithy-go v1.22.4. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.35 (2025-06-10) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.3.34 (2025-02-27) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go index eddabe6344..3479c11c48 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go @@ -3,4 +3,4 @@ package configsources // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.3.34" +const goModuleVersion = "1.4.6" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go index 5f0779997d..d4e6611f74 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go @@ -11,7 +11,7 @@ func GetPartition(region string) *PartitionConfig { var partitions = []Partition{ { ID: "aws", - RegionRegex: "^(us|eu|ap|sa|ca|me|af|il)\\-\\w+\\-\\d+$", + RegionRegex: "^(us|eu|ap|sa|ca|me|af|il|mx)\\-\\w+\\-\\d+$", DefaultConfig: PartitionConfig{ Name: "aws", DnsSuffix: "amazonaws.com", @@ -35,6 +35,13 @@ var partitions = []Partition{ SupportsFIPS: nil, SupportsDualStack: nil, }, + "ap-east-2": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, "ap-northeast-1": { Name: nil, DnsSuffix: nil, @@ -98,6 +105,27 @@ var partitions = []Partition{ SupportsFIPS: nil, SupportsDualStack: nil, }, + "ap-southeast-5": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "ap-southeast-6": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "ap-southeast-7": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, "aws-global": { Name: nil, DnsSuffix: nil, @@ -196,6 +224,13 @@ var partitions = []Partition{ SupportsFIPS: nil, SupportsDualStack: nil, }, + "mx-central-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, "sa-east-1": { Name: nil, DnsSuffix: nil, @@ -269,32 +304,18 @@ var partitions = []Partition{ }, }, { - ID: "aws-us-gov", - RegionRegex: "^us\\-gov\\-\\w+\\-\\d+$", + ID: "aws-eusc", + RegionRegex: "^eusc\\-(de)\\-\\w+\\-\\d+$", DefaultConfig: PartitionConfig{ - Name: "aws-us-gov", - DnsSuffix: "amazonaws.com", - DualStackDnsSuffix: "api.aws", + Name: "aws-eusc", + DnsSuffix: "amazonaws.eu", + DualStackDnsSuffix: "api.amazonwebservices.eu", SupportsFIPS: true, SupportsDualStack: true, - ImplicitGlobalRegion: "us-gov-west-1", + ImplicitGlobalRegion: "eusc-de-east-1", }, Regions: map[string]RegionOverrides{ - "aws-us-gov-global": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "us-gov-east-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "us-gov-west-1": { + "eusc-de-east-1": { Name: nil, DnsSuffix: nil, DualStackDnsSuffix: nil, @@ -309,7 +330,7 @@ var partitions = []Partition{ DefaultConfig: PartitionConfig{ Name: "aws-iso", DnsSuffix: "c2s.ic.gov", - DualStackDnsSuffix: "c2s.ic.gov", + DualStackDnsSuffix: "api.aws.ic.gov", SupportsFIPS: true, SupportsDualStack: false, ImplicitGlobalRegion: "us-iso-east-1", @@ -344,7 +365,7 @@ var partitions = []Partition{ DefaultConfig: PartitionConfig{ Name: "aws-iso-b", DnsSuffix: "sc2s.sgov.gov", - DualStackDnsSuffix: "sc2s.sgov.gov", + DualStackDnsSuffix: "api.aws.scloud", SupportsFIPS: true, SupportsDualStack: false, ImplicitGlobalRegion: "us-isob-east-1", @@ -372,12 +393,19 @@ var partitions = []Partition{ DefaultConfig: PartitionConfig{ Name: "aws-iso-e", DnsSuffix: "cloud.adc-e.uk", - DualStackDnsSuffix: "cloud.adc-e.uk", + DualStackDnsSuffix: "api.cloud-aws.adc-e.uk", SupportsFIPS: true, SupportsDualStack: false, ImplicitGlobalRegion: "eu-isoe-west-1", }, Regions: map[string]RegionOverrides{ + "aws-iso-e-global": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, "eu-isoe-west-1": { Name: nil, DnsSuffix: nil, @@ -393,11 +421,68 @@ var partitions = []Partition{ DefaultConfig: PartitionConfig{ Name: "aws-iso-f", DnsSuffix: "csp.hci.ic.gov", - DualStackDnsSuffix: "csp.hci.ic.gov", + DualStackDnsSuffix: "api.aws.hci.ic.gov", SupportsFIPS: true, SupportsDualStack: false, ImplicitGlobalRegion: "us-isof-south-1", }, - Regions: map[string]RegionOverrides{}, + Regions: map[string]RegionOverrides{ + "aws-iso-f-global": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "us-isof-east-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "us-isof-south-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + }, + }, + { + ID: "aws-us-gov", + RegionRegex: "^us\\-gov\\-\\w+\\-\\d+$", + DefaultConfig: PartitionConfig{ + Name: "aws-us-gov", + DnsSuffix: "amazonaws.com", + DualStackDnsSuffix: "api.aws", + SupportsFIPS: true, + SupportsDualStack: true, + ImplicitGlobalRegion: "us-gov-west-1", + }, + Regions: map[string]RegionOverrides{ + "aws-us-gov-global": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "us-gov-east-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "us-gov-west-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + }, }, } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json index e19224f1b8..c6582c9c63 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json @@ -17,6 +17,9 @@ "ap-east-1" : { "description" : "Asia Pacific (Hong Kong)" }, + "ap-east-2" : { + "description" : "Asia Pacific (Taipei)" + }, "ap-northeast-1" : { "description" : "Asia Pacific (Tokyo)" }, @@ -47,11 +50,14 @@ "ap-southeast-5" : { "description" : "Asia Pacific (Malaysia)" }, + "ap-southeast-6" : { + "description" : "Asia Pacific (New Zealand)" + }, "ap-southeast-7" : { "description" : "Asia Pacific (Thailand)" }, "aws-global" : { - "description" : "AWS Standard global region" + "description" : "aws global region" }, "ca-central-1" : { "description" : "Canada (Central)" @@ -124,7 +130,7 @@ "regionRegex" : "^cn\\-\\w+\\-\\d+$", "regions" : { "aws-cn-global" : { - "description" : "AWS China global region" + "description" : "aws-cn global region" }, "cn-north-1" : { "description" : "China (Beijing)" @@ -134,32 +140,26 @@ } } }, { - "id" : "aws-us-gov", + "id" : "aws-eusc", "outputs" : { - "dnsSuffix" : "amazonaws.com", - "dualStackDnsSuffix" : "api.aws", - "implicitGlobalRegion" : "us-gov-west-1", - "name" : "aws-us-gov", + "dnsSuffix" : "amazonaws.eu", + "dualStackDnsSuffix" : "api.amazonwebservices.eu", + "implicitGlobalRegion" : "eusc-de-east-1", + "name" : "aws-eusc", "supportsDualStack" : true, "supportsFIPS" : true }, - "regionRegex" : "^us\\-gov\\-\\w+\\-\\d+$", + "regionRegex" : "^eusc\\-(de)\\-\\w+\\-\\d+$", "regions" : { - "aws-us-gov-global" : { - "description" : "AWS GovCloud (US) global region" - }, - "us-gov-east-1" : { - "description" : "AWS GovCloud (US-East)" - }, - "us-gov-west-1" : { - "description" : "AWS GovCloud (US-West)" + "eusc-de-east-1" : { + "description" : "EU (Germany)" } } }, { "id" : "aws-iso", "outputs" : { "dnsSuffix" : "c2s.ic.gov", - "dualStackDnsSuffix" : "c2s.ic.gov", + "dualStackDnsSuffix" : "api.aws.ic.gov", "implicitGlobalRegion" : "us-iso-east-1", "name" : "aws-iso", "supportsDualStack" : false, @@ -168,7 +168,7 @@ "regionRegex" : "^us\\-iso\\-\\w+\\-\\d+$", "regions" : { "aws-iso-global" : { - "description" : "AWS ISO (US) global region" + "description" : "aws-iso global region" }, "us-iso-east-1" : { "description" : "US ISO East" @@ -181,7 +181,7 @@ "id" : "aws-iso-b", "outputs" : { "dnsSuffix" : "sc2s.sgov.gov", - "dualStackDnsSuffix" : "sc2s.sgov.gov", + "dualStackDnsSuffix" : "api.aws.scloud", "implicitGlobalRegion" : "us-isob-east-1", "name" : "aws-iso-b", "supportsDualStack" : false, @@ -190,7 +190,7 @@ "regionRegex" : "^us\\-isob\\-\\w+\\-\\d+$", "regions" : { "aws-iso-b-global" : { - "description" : "AWS ISOB (US) global region" + "description" : "aws-iso-b global region" }, "us-isob-east-1" : { "description" : "US ISOB East (Ohio)" @@ -200,7 +200,7 @@ "id" : "aws-iso-e", "outputs" : { "dnsSuffix" : "cloud.adc-e.uk", - "dualStackDnsSuffix" : "cloud.adc-e.uk", + "dualStackDnsSuffix" : "api.cloud-aws.adc-e.uk", "implicitGlobalRegion" : "eu-isoe-west-1", "name" : "aws-iso-e", "supportsDualStack" : false, @@ -208,6 +208,9 @@ }, "regionRegex" : "^eu\\-isoe\\-\\w+\\-\\d+$", "regions" : { + "aws-iso-e-global" : { + "description" : "aws-iso-e global region" + }, "eu-isoe-west-1" : { "description" : "EU ISOE West" } @@ -216,7 +219,7 @@ "id" : "aws-iso-f", "outputs" : { "dnsSuffix" : "csp.hci.ic.gov", - "dualStackDnsSuffix" : "csp.hci.ic.gov", + "dualStackDnsSuffix" : "api.aws.hci.ic.gov", "implicitGlobalRegion" : "us-isof-south-1", "name" : "aws-iso-f", "supportsDualStack" : false, @@ -225,7 +228,7 @@ "regionRegex" : "^us\\-isof\\-\\w+\\-\\d+$", "regions" : { "aws-iso-f-global" : { - "description" : "AWS ISOF global region" + "description" : "aws-iso-f global region" }, "us-isof-east-1" : { "description" : "US ISOF EAST" @@ -234,6 +237,28 @@ "description" : "US ISOF SOUTH" } } + }, { + "id" : "aws-us-gov", + "outputs" : { + "dnsSuffix" : "amazonaws.com", + "dualStackDnsSuffix" : "api.aws", + "implicitGlobalRegion" : "us-gov-west-1", + "name" : "aws-us-gov", + "supportsDualStack" : true, + "supportsFIPS" : true + }, + "regionRegex" : "^us\\-gov\\-\\w+\\-\\d+$", + "regions" : { + "aws-us-gov-global" : { + "description" : "aws-us-gov global region" + }, + "us-gov-east-1" : { + "description" : "AWS GovCloud (US-East)" + }, + "us-gov-west-1" : { + "description" : "AWS GovCloud (US-West)" + } + } } ], "version" : "1.1" } \ No newline at end of file diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md index 83e5bd28a7..7ccb390338 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md @@ -1,3 +1,46 @@ +# v2.7.6 (2025-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.7.5 (2025-08-27) + +* **Dependency Update**: Update to smithy-go v1.23.0. +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.7.4 (2025-08-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.7.3 (2025-08-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.7.2 (2025-08-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.7.1 (2025-07-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.7.0 (2025-07-28) + +* **Feature**: Add support for HTTP interceptors. +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.37 (2025-07-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.36 (2025-06-17) + +* **Dependency Update**: Update to smithy-go v1.22.4. +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.35 (2025-06-10) + +* **Dependency Update**: Updated to the latest SDK module versions + # v2.6.34 (2025-02-27) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go index 735dba7ac7..2d36cac95a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go @@ -3,4 +3,4 @@ package endpoints // goModuleVersion is the tagged release for this module -const goModuleVersion = "2.6.34" +const goModuleVersion = "2.7.6" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/CHANGELOG.md new file mode 100644 index 0000000000..815e1331b7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/CHANGELOG.md @@ -0,0 +1,818 @@ +# v1.50.1 (2025-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.50.0 (2025-08-28) + +* **Feature**: Remove incorrect endpoint tests + +# v1.49.2 (2025-08-27) + +* **Dependency Update**: Update to smithy-go v1.23.0. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.49.1 (2025-08-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.49.0 (2025-08-20) + +* **Feature**: Remove incorrect endpoint tests +* **Bug Fix**: Remove unused deserialization code. + +# v1.48.0 (2025-08-14) + +* **Feature**: This release 1/ Adds support for throttled keys mode for CloudWatch Contributor Insights, 2/ Adds throttling reasons to exceptions across dataplane APIs. 3/ Explicitly models ThrottlingException as a class in statically typed languages. Refer to the launch day blog post for more details. + +# v1.47.0 (2025-08-11) + +* **Feature**: Add support for configuring per-service Options via callback on global config. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.46.0 (2025-08-04) + +* **Feature**: Support configurable auth scheme preferences in service clients via AWS_AUTH_SCHEME_PREFERENCE in the environment, auth_scheme_preference in the config file, and through in-code settings on LoadDefaultConfig and client constructor methods. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.45.1 (2025-07-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.45.0 (2025-07-28) + +* **Feature**: Add support for HTTP interceptors. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.44.1 (2025-07-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.44.0 (2025-06-30) + +* **Feature**: This change adds support for witnesses in global tables. It also adds a new table status, REPLICATION_NOT_AUTHORIZED. This status will indicate scenarios where global replicas table can't be utilized for data plane operations. + +# v1.43.4 (2025-06-17) + +* **Dependency Update**: Update to smithy-go v1.22.4. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.43.3 (2025-06-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.43.2 (2025-06-06) + +* No change notes available for this release. + +# v1.43.1 (2025-04-28) + +* **Documentation**: Doc only update for GSI descriptions. + +# v1.43.0 (2025-04-24) + +* **Feature**: Add support for ARN-sourced account endpoint generation for TransactWriteItems. This will generate account endpoints for DynamoDB TransactWriteItems requests using ARN-sourced account ID when available. + +# v1.42.4 (2025-04-11) + +* **Documentation**: Doc only update for API descriptions. + +# v1.42.3 (2025-04-10) + +* No change notes available for this release. + +# v1.42.2 (2025-04-09) + +* **Documentation**: Documentation update for secondary indexes and Create_Table. + +# v1.42.1 (2025-04-03) + +* No change notes available for this release. + +# v1.42.0 (2025-03-13) + +* **Feature**: Generate account endpoints for DynamoDB requests using ARN-sourced account ID when available + +# v1.41.1 (2025-03-04.2) + +* **Bug Fix**: Add assurance test for operation order. + +# v1.41.0 (2025-02-27) + +* **Feature**: Track credential providers via User-Agent Feature ids +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.40.2 (2025-02-18) + +* **Bug Fix**: Add missing AccountIDEndpointMode binding to endpoint resolution. +* **Bug Fix**: Bump go version to 1.22 +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.40.1 (2025-02-11) + +* No change notes available for this release. + +# v1.40.0 (2025-02-05) + +* **Feature**: Track AccountID endpoint mode in user-agent. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.39.9 (2025-02-04) + +* No change notes available for this release. + +# v1.39.8 (2025-01-31) + +* **Dependency Update**: Switch to code-generated waiter matchers, removing the dependency on go-jmespath. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.39.7 (2025-01-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.39.6 (2025-01-24) + +* **Dependency Update**: Updated to the latest SDK module versions +* **Dependency Update**: Upgrade to smithy-go v1.22.2. + +# v1.39.5 (2025-01-17) + +* **Bug Fix**: Fix bug where credentials weren't refreshed during retry loop. + +# v1.39.4 (2025-01-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.39.3 (2025-01-14) + +* **Bug Fix**: Fix issue where waiters were not failing on unmatched errors as they should. This may have breaking behavioral changes for users in fringe cases. See [this announcement](https://github.com/aws/aws-sdk-go-v2/discussions/2954) for more information. + +# v1.39.2 (2025-01-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.39.1 (2025-01-08) + +* No change notes available for this release. + +# v1.39.0 (2025-01-07) + +* **Feature**: This release makes Amazon DynamoDB point-in-time-recovery (PITR) to be configurable. You can set PITR recovery period for each table individually to between 1 and 35 days. + +# v1.38.1 (2024-12-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.38.0 (2024-12-03.2) + +* **Feature**: This change adds support for global tables with multi-Region strong consistency (in preview). The UpdateTable API now supports a new attribute MultiRegionConsistency to set consistency when creating global tables. The DescribeTable output now optionally includes the MultiRegionConsistency attribute. + +# v1.37.2 (2024-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.37.1 (2024-11-18) + +* **Dependency Update**: Update to smithy-go v1.22.1. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.37.0 (2024-11-13) + +* **Feature**: This release includes supports the new WarmThroughput feature for DynamoDB. You can now provide an optional WarmThroughput attribute for CreateTable or UpdateTable APIs to pre-warm your table or global secondary index. You can also use DescribeTable to see the latest WarmThroughput value. + +# v1.36.5 (2024-11-07) + +* **Bug Fix**: Adds case-insensitive handling of error message fields in service responses + +# v1.36.4 (2024-11-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.36.3 (2024-10-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.36.2 (2024-10-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.36.1 (2024-10-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.36.0 (2024-10-04) + +* **Feature**: Add support for HTTP client metrics. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.35.4 (2024-10-03) + +* No change notes available for this release. + +# v1.35.3 (2024-09-27) + +* No change notes available for this release. + +# v1.35.2 (2024-09-25) + +* No change notes available for this release. + +# v1.35.1 (2024-09-23) + +* No change notes available for this release. + +# v1.35.0 (2024-09-20) + +* **Feature**: Add tracing and metrics support to service clients. +* **Feature**: Generate and use AWS-account-based endpoints for DynamoDB requests when the account ID is available. The new endpoint URL pattern will be https://.ddb..amazonaws.com. See the documentation for details: https://docs.aws.amazon.com/sdkref/latest/guide/feature-account-endpoints.html. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.34.10 (2024-09-17) + +* **Bug Fix**: **BREAKFIX**: Only generate AccountIDEndpointMode config for services that use it. This is a compiler break, but removes no actual functionality, as no services currently use the account ID in endpoint resolution. + +# v1.34.9 (2024-09-09) + +* **Documentation**: Doc-only update for DynamoDB. Added information about async behavior for TagResource and UntagResource APIs and updated the description of ResourceInUseException. + +# v1.34.8 (2024-09-04) + +* No change notes available for this release. + +# v1.34.7 (2024-09-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.34.6 (2024-08-22) + +* No change notes available for this release. + +# v1.34.5 (2024-08-15) + +* **Dependency Update**: Bump minimum Go version to 1.21. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.34.4 (2024-07-24) + +* **Documentation**: DynamoDB doc only update for July + +# v1.34.3 (2024-07-10.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.34.2 (2024-07-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.34.1 (2024-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.34.0 (2024-06-26) + +* **Feature**: Support list-of-string endpoint parameter. + +# v1.33.2 (2024-06-20) + +* **Documentation**: Doc-only update for DynamoDB. Fixed Important note in 6 Global table APIs - CreateGlobalTable, DescribeGlobalTable, DescribeGlobalTableSettings, ListGlobalTables, UpdateGlobalTable, and UpdateGlobalTableSettings. + +# v1.33.1 (2024-06-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.33.0 (2024-06-18) + +* **Feature**: Track usage of various AWS SDK features in user-agent string. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.32.9 (2024-06-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.32.8 (2024-06-07) + +* **Bug Fix**: Add clock skew correction on all service clients +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.32.7 (2024-06-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.32.6 (2024-05-28) + +* **Documentation**: Doc-only update for DynamoDB. Specified the IAM actions needed to authorize a user to create a table with a resource-based policy. + +# v1.32.5 (2024-05-24) + +* **Documentation**: Documentation only updates for DynamoDB. + +# v1.32.4 (2024-05-23) + +* No change notes available for this release. + +# v1.32.3 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.32.2 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.32.1 (2024-05-08) + +* **Bug Fix**: GoDoc improvement + +# v1.32.0 (2024-05-02) + +* **Feature**: This release adds support to specify an optional, maximum OnDemandThroughput for DynamoDB tables and global secondary indexes in the CreateTable or UpdateTable APIs. You can also override the OnDemandThroughput settings by calling the ImportTable, RestoreFromPointInTime, or RestoreFromBackup APIs. + +# v1.31.1 (2024-03-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.31.0 (2024-03-20) + +* **Feature**: This release introduces 3 new APIs ('GetResourcePolicy', 'PutResourcePolicy' and 'DeleteResourcePolicy') and modifies the existing 'CreateTable' API for the resource-based policy support. It also modifies several APIs to accept a 'TableArn' for the 'TableName' parameter. + +# v1.30.5 (2024-03-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.30.4 (2024-03-07) + +* **Bug Fix**: Remove dependency on go-cmp. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.30.3 (2024-03-06) + +* **Documentation**: Doc only updates for DynamoDB documentation + +# v1.30.2 (2024-03-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.30.1 (2024-02-23) + +* **Bug Fix**: Move all common, SDK-side middleware stack ops into the service client module to prevent cross-module compatibility issues in the future. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.30.0 (2024-02-22) + +* **Feature**: Add middleware stack snapshot tests. + +# v1.29.2 (2024-02-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.29.1 (2024-02-20) + +* **Bug Fix**: When sourcing values for a service's `EndpointParameters`, the lack of a configured region (i.e. `options.Region == ""`) will now translate to a `nil` value for `EndpointParameters.Region` instead of a pointer to the empty string `""`. This will result in a much more explicit error when calling an operation instead of an obscure hostname lookup failure. +* **Documentation**: Publishing quick fix for doc only update. + +# v1.29.0 (2024-02-16) + +* **Feature**: Add new ClientOptions field to waiter config which allows you to extend the config for operation calls made by waiters. + +# v1.28.1 (2024-02-15) + +* **Bug Fix**: Correct failure to determine the error type in awsJson services that could occur when errors were modeled with a non-string `code` field. + +# v1.28.0 (2024-02-13) + +* **Feature**: Bump minimum Go version to 1.20 per our language support policy. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.1 (2024-02-02) + +* **Documentation**: Any number of users can execute up to 50 concurrent restores (any type of restore) in a given account. + +# v1.27.0 (2024-01-19) + +* **Feature**: This release adds support for including ApproximateCreationDateTimePrecision configurations in EnableKinesisStreamingDestination API, adds the same as an optional field in the response of DescribeKinesisStreamingDestination, and adds support for a new UpdateKinesisStreamingDestination API. + +# v1.26.9 (2024-01-17) + +* **Documentation**: Updating note for enabling streams for UpdateTable. + +# v1.26.8 (2024-01-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.7 (2023-12-20) + +* No change notes available for this release. + +# v1.26.6 (2023-12-08) + +* **Bug Fix**: Reinstate presence of default Retryer in functional options, but still respect max attempts set therein. + +# v1.26.5 (2023-12-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.4 (2023-12-06) + +* **Bug Fix**: Restore pre-refactor auth behavior where all operations could technically be performed anonymously. + +# v1.26.3 (2023-12-01) + +* **Bug Fix**: Correct wrapping of errors in authentication workflow. +* **Bug Fix**: Correctly recognize cache-wrapped instances of AnonymousCredentials at client construction. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.2 (2023-11-30.2) + +* **Bug Fix**: Respect caller region overrides in endpoint discovery. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.1 (2023-11-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.0 (2023-11-29) + +* **Feature**: Expose Options() accessor on service clients. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.25.5 (2023-11-28.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.25.4 (2023-11-28) + +* **Bug Fix**: Respect setting RetryMaxAttempts in functional options at client construction. + +# v1.25.3 (2023-11-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.25.2 (2023-11-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.25.1 (2023-11-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.25.0 (2023-11-01) + +* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.0 (2023-10-31) + +* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.23.0 (2023-10-18) + +* **Feature**: Add handwritten paginators that were present in some services in the v1 SDK. +* **Documentation**: Updating descriptions for several APIs. + +# v1.22.2 (2023-10-12) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.22.1 (2023-10-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.22.0 (2023-09-26) + +* **Feature**: Amazon DynamoDB now supports Incremental Export as an enhancement to the existing Export Table + +# v1.21.5 (2023-08-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.21.4 (2023-08-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.21.3 (2023-08-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.21.2 (2023-08-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.21.1 (2023-08-01) + +* No change notes available for this release. + +# v1.21.0 (2023-07-31) + +* **Feature**: Adds support for smithy-modeled endpoint resolution. A new rules-based endpoint resolution will be added to the SDK which will supercede and deprecate existing endpoint resolution. Specifically, EndpointResolver will be deprecated while BaseEndpoint and EndpointResolverV2 will take its place. For more information, please see the Endpoints section in our Developer Guide. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.3 (2023-07-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.2 (2023-07-25) + +* **Documentation**: Documentation updates for DynamoDB + +# v1.20.1 (2023-07-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.0 (2023-06-29) + +* **Feature**: This release adds ReturnValuesOnConditionCheckFailure parameter to PutItem, UpdateItem, DeleteItem, ExecuteStatement, BatchExecuteStatement and ExecuteTransaction APIs. When set to ALL_OLD, API returns a copy of the item as it was when a conditional write failed + +# v1.19.11 (2023-06-21) + +* **Documentation**: Documentation updates for DynamoDB + +# v1.19.10 (2023-06-15) + +* No change notes available for this release. + +# v1.19.9 (2023-06-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.19.8 (2023-06-12) + +* **Documentation**: Documentation updates for DynamoDB + +# v1.19.7 (2023-05-04) + +* No change notes available for this release. + +# v1.19.6 (2023-04-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.19.5 (2023-04-17) + +* **Documentation**: Documentation updates for DynamoDB API + +# v1.19.4 (2023-04-10) + +* No change notes available for this release. + +# v1.19.3 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.19.2 (2023-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.19.1 (2023-03-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.19.0 (2023-03-08) + +* **Feature**: Adds deletion protection support to DynamoDB tables. Tables with deletion protection enabled cannot be deleted. Deletion protection is disabled by default, can be enabled via the CreateTable or UpdateTable APIs, and is visible in TableDescription. This setting is not replicated for Global Tables. + +# v1.18.6 (2023-03-03) + +* **Documentation**: Documentation updates for DynamoDB. + +# v1.18.5 (2023-02-22) + +* **Bug Fix**: Prevent nil pointer dereference when retrieving error codes. + +# v1.18.4 (2023-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.3 (2023-02-15) + +* **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. +* **Bug Fix**: Correct error type parsing for restJson services. + +# v1.18.2 (2023-02-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.1 (2023-01-23) + +* No change notes available for this release. + +# v1.18.0 (2023-01-05) + +* **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). + +# v1.17.9 (2022-12-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.8 (2022-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.7 (2022-11-22) + +* No change notes available for this release. + +# v1.17.6 (2022-11-18) + +* **Documentation**: Updated minor fixes for DynamoDB documentation. + +# v1.17.5 (2022-11-16) + +* No change notes available for this release. + +# v1.17.4 (2022-11-10) + +* No change notes available for this release. + +# v1.17.3 (2022-10-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.2 (2022-10-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.1 (2022-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.0 (2022-09-15) + +* **Feature**: Increased DynamoDB transaction limit from 25 to 100. + +# v1.16.5 (2022-09-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.4 (2022-09-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.3 (2022-08-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.2 (2022-08-30) + +* No change notes available for this release. + +# v1.16.1 (2022-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.0 (2022-08-18) + +* **Feature**: This release adds support for importing data from S3 into a new DynamoDB table + +# v1.15.13 (2022-08-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.12 (2022-08-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.11 (2022-08-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.10 (2022-08-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.9 (2022-07-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.8 (2022-06-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.7 (2022-06-17) + +* **Documentation**: Doc only update for DynamoDB service + +# v1.15.6 (2022-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.5 (2022-05-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.4 (2022-04-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.3 (2022-03-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.2 (2022-03-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.1 (2022-03-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.0 (2022-03-08) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.0 (2022-02-24) + +* **Feature**: API client updated +* **Feature**: Adds RetryMaxAttempts and RetryMod to API client Options. This allows the API clients' default Retryer to be configured from the shared configuration files or environment variables. Adding a new Retry mode of `Adaptive`. `Adaptive` retry mode is an experimental mode, adding client rate limiting when throttles reponses are received from an API. See [retry.AdaptiveMode](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws/retry#AdaptiveMode) for more details, and configuration options. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.0 (2022-01-14) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.0 (2022-01-07) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.0 (2021-12-21) + +* **Feature**: API Paginators now support specifying the initial starting token, and support stopping on empty string tokens. +* **Feature**: Updated to latest service endpoints + +# v1.10.0 (2021-12-02) + +* **Feature**: API client updated +* **Bug Fix**: Fixes a bug that prevented aws.EndpointResolverWithOptions from being used by the service client. ([#1514](https://github.com/aws/aws-sdk-go-v2/pull/1514)) +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.0 (2021-11-30) + +* **Feature**: API client updated +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.1 (2021-11-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.0 (2021-11-12) + +* **Feature**: Service clients now support custom endpoints that have an initial URI path defined. +* **Feature**: Waiters now have a `WaitForOutput` method, which can be used to retrieve the output of the successful wait operation. Thank you to [Andrew Haines](https://github.com/haines) for contributing this feature. +* **Documentation**: Updated service to latest API model. + +# v1.7.0 (2021-11-06) + +* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.0 (2021-10-21) + +* **Feature**: API client updated +* **Feature**: Updated to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.2 (2021-10-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.1 (2021-09-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.0 (2021-08-27) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.3 (2021-08-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.2 (2021-08-04) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.1 (2021-07-15) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.0 (2021-06-25) + +* **Feature**: Adds support for endpoint discovery. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.1 (2021-05-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.0 (2021-05-14) + +* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. +* **Dependency Update**: Updated to the latest SDK module versions + diff --git a/vendor/github.com/go-jose/go-jose/v4/LICENSE b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/LICENSE.txt similarity index 100% rename from vendor/github.com/go-jose/go-jose/v4/LICENSE rename to vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/LICENSE.txt diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_client.go new file mode 100644 index 0000000000..487c8200a6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_client.go @@ -0,0 +1,1175 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package dynamodb + +import ( + "context" + cryptorand "crypto/rand" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/defaults" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/retry" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" + internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" + internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + internalmiddleware "github.com/aws/aws-sdk-go-v2/internal/middleware" + ddbcust "github.com/aws/aws-sdk-go-v2/service/dynamodb/internal/customizations" + acceptencodingcust "github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding" + internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery" + smithy "github.com/aws/smithy-go" + smithyauth "github.com/aws/smithy-go/auth" + smithydocument "github.com/aws/smithy-go/document" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/metrics" + "github.com/aws/smithy-go/middleware" + smithyrand "github.com/aws/smithy-go/rand" + "github.com/aws/smithy-go/tracing" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net" + "net/http" + "net/url" + "strings" + "sync/atomic" + "time" +) + +const ServiceID = "DynamoDB" +const ServiceAPIVersion = "2012-08-10" + +type operationMetrics struct { + Duration metrics.Float64Histogram + SerializeDuration metrics.Float64Histogram + ResolveIdentityDuration metrics.Float64Histogram + ResolveEndpointDuration metrics.Float64Histogram + SignRequestDuration metrics.Float64Histogram + DeserializeDuration metrics.Float64Histogram +} + +func (m *operationMetrics) histogramFor(name string) metrics.Float64Histogram { + switch name { + case "client.call.duration": + return m.Duration + case "client.call.serialization_duration": + return m.SerializeDuration + case "client.call.resolve_identity_duration": + return m.ResolveIdentityDuration + case "client.call.resolve_endpoint_duration": + return m.ResolveEndpointDuration + case "client.call.signing_duration": + return m.SignRequestDuration + case "client.call.deserialization_duration": + return m.DeserializeDuration + default: + panic("unrecognized operation metric") + } +} + +func timeOperationMetric[T any]( + ctx context.Context, metric string, fn func() (T, error), + opts ...metrics.RecordMetricOption, +) (T, error) { + instr := getOperationMetrics(ctx).histogramFor(metric) + opts = append([]metrics.RecordMetricOption{withOperationMetadata(ctx)}, opts...) + + start := time.Now() + v, err := fn() + end := time.Now() + + elapsed := end.Sub(start) + instr.Record(ctx, float64(elapsed)/1e9, opts...) + return v, err +} + +func startMetricTimer(ctx context.Context, metric string, opts ...metrics.RecordMetricOption) func() { + instr := getOperationMetrics(ctx).histogramFor(metric) + opts = append([]metrics.RecordMetricOption{withOperationMetadata(ctx)}, opts...) + + var ended bool + start := time.Now() + return func() { + if ended { + return + } + ended = true + + end := time.Now() + + elapsed := end.Sub(start) + instr.Record(ctx, float64(elapsed)/1e9, opts...) + } +} + +func withOperationMetadata(ctx context.Context) metrics.RecordMetricOption { + return func(o *metrics.RecordMetricOptions) { + o.Properties.Set("rpc.service", middleware.GetServiceID(ctx)) + o.Properties.Set("rpc.method", middleware.GetOperationName(ctx)) + } +} + +type operationMetricsKey struct{} + +func withOperationMetrics(parent context.Context, mp metrics.MeterProvider) (context.Context, error) { + meter := mp.Meter("github.com/aws/aws-sdk-go-v2/service/dynamodb") + om := &operationMetrics{} + + var err error + + om.Duration, err = operationMetricTimer(meter, "client.call.duration", + "Overall call duration (including retries and time to send or receive request and response body)") + if err != nil { + return nil, err + } + om.SerializeDuration, err = operationMetricTimer(meter, "client.call.serialization_duration", + "The time it takes to serialize a message body") + if err != nil { + return nil, err + } + om.ResolveIdentityDuration, err = operationMetricTimer(meter, "client.call.auth.resolve_identity_duration", + "The time taken to acquire an identity (AWS credentials, bearer token, etc) from an Identity Provider") + if err != nil { + return nil, err + } + om.ResolveEndpointDuration, err = operationMetricTimer(meter, "client.call.resolve_endpoint_duration", + "The time it takes to resolve an endpoint (endpoint resolver, not DNS) for the request") + if err != nil { + return nil, err + } + om.SignRequestDuration, err = operationMetricTimer(meter, "client.call.auth.signing_duration", + "The time it takes to sign a request") + if err != nil { + return nil, err + } + om.DeserializeDuration, err = operationMetricTimer(meter, "client.call.deserialization_duration", + "The time it takes to deserialize a message body") + if err != nil { + return nil, err + } + + return context.WithValue(parent, operationMetricsKey{}, om), nil +} + +func operationMetricTimer(m metrics.Meter, name, desc string) (metrics.Float64Histogram, error) { + return m.Float64Histogram(name, func(o *metrics.InstrumentOptions) { + o.UnitLabel = "s" + o.Description = desc + }) +} + +func getOperationMetrics(ctx context.Context) *operationMetrics { + return ctx.Value(operationMetricsKey{}).(*operationMetrics) +} + +func operationTracer(p tracing.TracerProvider) tracing.Tracer { + return p.Tracer("github.com/aws/aws-sdk-go-v2/service/dynamodb") +} + +// Client provides the API client to make operations call for Amazon DynamoDB. +type Client struct { + options Options + + // cache used to store discovered endpoints + endpointCache *internalEndpointDiscovery.EndpointCache + + // Difference between the time reported by the server and the client + timeOffset *atomic.Int64 +} + +// New returns an initialized Client based on the functional options. Provide +// additional functional options to further configure the behavior of the client, +// such as changing the client's endpoint or adding custom middleware behavior. +func New(options Options, optFns ...func(*Options)) *Client { + options = options.Copy() + + resolveDefaultLogger(&options) + + setResolvedDefaultsMode(&options) + + resolveRetryer(&options) + + resolveHTTPClient(&options) + + resolveHTTPSignerV4(&options) + + resolveIdempotencyTokenProvider(&options) + + resolveEnableEndpointDiscovery(&options) + + resolveEndpointResolverV2(&options) + + resolveTracerProvider(&options) + + resolveMeterProvider(&options) + + resolveAuthSchemeResolver(&options) + + for _, fn := range optFns { + fn(&options) + } + + finalizeRetryMaxAttempts(&options) + + ignoreAnonymousAuth(&options) + + wrapWithAnonymousAuth(&options) + + resolveAuthSchemes(&options) + + client := &Client{ + options: options, + } + + resolveEndpointCache(client) + + initializeTimeOffsetResolver(client) + + return client +} + +// Options returns a copy of the client configuration. +// +// Callers SHOULD NOT perform mutations on any inner structures within client +// config. Config overrides should instead be made on a per-operation basis through +// functional options. +func (c *Client) Options() Options { + return c.options.Copy() +} + +func (c *Client) invokeOperation( + ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error, +) ( + result interface{}, metadata middleware.Metadata, err error, +) { + ctx = middleware.ClearStackValues(ctx) + ctx = middleware.WithServiceID(ctx, ServiceID) + ctx = middleware.WithOperationName(ctx, opID) + + stack := middleware.NewStack(opID, smithyhttp.NewStackRequest) + options := c.options.Copy() + + for _, fn := range optFns { + fn(&options) + } + + finalizeOperationRetryMaxAttempts(&options, *c) + + finalizeClientEndpointResolverOptions(&options) + + for _, fn := range stackFns { + if err := fn(stack, options); err != nil { + return nil, metadata, err + } + } + + for _, fn := range options.APIOptions { + if err := fn(stack); err != nil { + return nil, metadata, err + } + } + + ctx, err = withOperationMetrics(ctx, options.MeterProvider) + if err != nil { + return nil, metadata, err + } + + tracer := operationTracer(options.TracerProvider) + spanName := fmt.Sprintf("%s.%s", ServiceID, opID) + + ctx = tracing.WithOperationTracer(ctx, tracer) + + ctx, span := tracer.StartSpan(ctx, spanName, func(o *tracing.SpanOptions) { + o.Kind = tracing.SpanKindClient + o.Properties.Set("rpc.system", "aws-api") + o.Properties.Set("rpc.method", opID) + o.Properties.Set("rpc.service", ServiceID) + }) + endTimer := startMetricTimer(ctx, "client.call.duration") + defer endTimer() + defer span.End() + + handler := smithyhttp.NewClientHandlerWithOptions(options.HTTPClient, func(o *smithyhttp.ClientHandler) { + o.Meter = options.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/dynamodb") + }) + decorated := middleware.DecorateHandler(handler, stack) + result, metadata, err = decorated.Handle(ctx, params) + if err != nil { + span.SetProperty("exception.type", fmt.Sprintf("%T", err)) + span.SetProperty("exception.message", err.Error()) + + var aerr smithy.APIError + if errors.As(err, &aerr) { + span.SetProperty("api.error_code", aerr.ErrorCode()) + span.SetProperty("api.error_message", aerr.ErrorMessage()) + span.SetProperty("api.error_fault", aerr.ErrorFault().String()) + } + + err = &smithy.OperationError{ + ServiceID: ServiceID, + OperationName: opID, + Err: err, + } + } + + span.SetProperty("error", err != nil) + if err == nil { + span.SetStatus(tracing.SpanStatusOK) + } else { + span.SetStatus(tracing.SpanStatusError) + } + + return result, metadata, err +} + +type operationInputKey struct{} + +func setOperationInput(ctx context.Context, input interface{}) context.Context { + return middleware.WithStackValue(ctx, operationInputKey{}, input) +} + +func getOperationInput(ctx context.Context) interface{} { + return middleware.GetStackValue(ctx, operationInputKey{}) +} + +type setOperationInputMiddleware struct { +} + +func (*setOperationInputMiddleware) ID() string { + return "setOperationInput" +} + +func (m *setOperationInputMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + ctx = setOperationInput(ctx, in.Parameters) + return next.HandleSerialize(ctx, in) +} + +func addProtocolFinalizerMiddlewares(stack *middleware.Stack, options Options, operation string) error { + if err := stack.Finalize.Add(&resolveAuthSchemeMiddleware{operation: operation, options: options}, middleware.Before); err != nil { + return fmt.Errorf("add ResolveAuthScheme: %w", err) + } + if err := stack.Finalize.Insert(&getIdentityMiddleware{options: options}, "ResolveAuthScheme", middleware.After); err != nil { + return fmt.Errorf("add GetIdentity: %v", err) + } + if err := stack.Finalize.Insert(&resolveEndpointV2Middleware{options: options}, "GetIdentity", middleware.After); err != nil { + return fmt.Errorf("add ResolveEndpointV2: %v", err) + } + if err := stack.Finalize.Insert(&signRequestMiddleware{options: options}, "ResolveEndpointV2", middleware.After); err != nil { + return fmt.Errorf("add Signing: %w", err) + } + return nil +} +func resolveAuthSchemeResolver(options *Options) { + if options.AuthSchemeResolver == nil { + options.AuthSchemeResolver = &defaultAuthSchemeResolver{} + } +} + +func resolveAuthSchemes(options *Options) { + if options.AuthSchemes == nil { + options.AuthSchemes = []smithyhttp.AuthScheme{ + internalauth.NewHTTPAuthScheme("aws.auth#sigv4", &internalauthsmithy.V4SignerAdapter{ + Signer: options.HTTPSignerV4, + Logger: options.Logger, + LogSigning: options.ClientLogMode.IsSigning(), + }), + } + } +} + +type noSmithyDocumentSerde = smithydocument.NoSerde + +type legacyEndpointContextSetter struct { + LegacyResolver EndpointResolver +} + +func (*legacyEndpointContextSetter) ID() string { + return "legacyEndpointContextSetter" +} + +func (m *legacyEndpointContextSetter) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + if m.LegacyResolver != nil { + ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, true) + } + + return next.HandleInitialize(ctx, in) + +} +func addlegacyEndpointContextSetter(stack *middleware.Stack, o Options) error { + return stack.Initialize.Add(&legacyEndpointContextSetter{ + LegacyResolver: o.EndpointResolver, + }, middleware.Before) +} + +func resolveDefaultLogger(o *Options) { + if o.Logger != nil { + return + } + o.Logger = logging.Nop{} +} + +func addSetLoggerMiddleware(stack *middleware.Stack, o Options) error { + return middleware.AddSetLoggerMiddleware(stack, o.Logger) +} + +func setResolvedDefaultsMode(o *Options) { + if len(o.resolvedDefaultsMode) > 0 { + return + } + + var mode aws.DefaultsMode + mode.SetFromString(string(o.DefaultsMode)) + + if mode == aws.DefaultsModeAuto { + mode = defaults.ResolveDefaultsModeAuto(o.Region, o.RuntimeEnvironment) + } + + o.resolvedDefaultsMode = mode +} + +// NewFromConfig returns a new client from the provided config. +func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { + opts := Options{ + Region: cfg.Region, + DefaultsMode: cfg.DefaultsMode, + RuntimeEnvironment: cfg.RuntimeEnvironment, + HTTPClient: cfg.HTTPClient, + Credentials: cfg.Credentials, + APIOptions: cfg.APIOptions, + Logger: cfg.Logger, + ClientLogMode: cfg.ClientLogMode, + AppID: cfg.AppID, + AccountIDEndpointMode: cfg.AccountIDEndpointMode, + AuthSchemePreference: cfg.AuthSchemePreference, + } + resolveAWSRetryerProvider(cfg, &opts) + resolveAWSRetryMaxAttempts(cfg, &opts) + resolveAWSRetryMode(cfg, &opts) + resolveAWSEndpointResolver(cfg, &opts) + resolveInterceptors(cfg, &opts) + resolveEnableEndpointDiscoveryFromConfigSources(cfg, &opts) + resolveUseDualStackEndpoint(cfg, &opts) + resolveUseFIPSEndpoint(cfg, &opts) + resolveBaseEndpoint(cfg, &opts) + return New(opts, func(o *Options) { + for _, opt := range cfg.ServiceOptions { + opt(ServiceID, o) + } + for _, opt := range optFns { + opt(o) + } + }) +} + +func resolveHTTPClient(o *Options) { + var buildable *awshttp.BuildableClient + + if o.HTTPClient != nil { + var ok bool + buildable, ok = o.HTTPClient.(*awshttp.BuildableClient) + if !ok { + return + } + } else { + buildable = awshttp.NewBuildableClient() + } + + modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) + if err == nil { + buildable = buildable.WithDialerOptions(func(dialer *net.Dialer) { + if dialerTimeout, ok := modeConfig.GetConnectTimeout(); ok { + dialer.Timeout = dialerTimeout + } + }) + + buildable = buildable.WithTransportOptions(func(transport *http.Transport) { + if tlsHandshakeTimeout, ok := modeConfig.GetTLSNegotiationTimeout(); ok { + transport.TLSHandshakeTimeout = tlsHandshakeTimeout + } + }) + } + + o.HTTPClient = buildable +} + +func resolveRetryer(o *Options) { + if o.Retryer != nil { + return + } + + if len(o.RetryMode) == 0 { + modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) + if err == nil { + o.RetryMode = modeConfig.RetryMode + } + } + if len(o.RetryMode) == 0 { + o.RetryMode = aws.RetryModeStandard + } + + var standardOptions []func(*retry.StandardOptions) + if v := o.RetryMaxAttempts; v != 0 { + standardOptions = append(standardOptions, func(so *retry.StandardOptions) { + so.MaxAttempts = v + }) + } + + switch o.RetryMode { + case aws.RetryModeAdaptive: + var adaptiveOptions []func(*retry.AdaptiveModeOptions) + if len(standardOptions) != 0 { + adaptiveOptions = append(adaptiveOptions, func(ao *retry.AdaptiveModeOptions) { + ao.StandardOptions = append(ao.StandardOptions, standardOptions...) + }) + } + o.Retryer = retry.NewAdaptiveMode(adaptiveOptions...) + + default: + o.Retryer = retry.NewStandard(standardOptions...) + } +} + +func resolveAWSRetryerProvider(cfg aws.Config, o *Options) { + if cfg.Retryer == nil { + return + } + o.Retryer = cfg.Retryer() +} + +func resolveAWSRetryMode(cfg aws.Config, o *Options) { + if len(cfg.RetryMode) == 0 { + return + } + o.RetryMode = cfg.RetryMode +} +func resolveAWSRetryMaxAttempts(cfg aws.Config, o *Options) { + if cfg.RetryMaxAttempts == 0 { + return + } + o.RetryMaxAttempts = cfg.RetryMaxAttempts +} + +func finalizeRetryMaxAttempts(o *Options) { + if o.RetryMaxAttempts == 0 { + return + } + + o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts) +} + +func finalizeOperationRetryMaxAttempts(o *Options, client Client) { + if v := o.RetryMaxAttempts; v == 0 || v == client.options.RetryMaxAttempts { + return + } + + o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts) +} + +func resolveAWSEndpointResolver(cfg aws.Config, o *Options) { + if cfg.EndpointResolver == nil && cfg.EndpointResolverWithOptions == nil { + return + } + o.EndpointResolver = withEndpointResolver(cfg.EndpointResolver, cfg.EndpointResolverWithOptions) +} + +func resolveInterceptors(cfg aws.Config, o *Options) { + o.Interceptors = cfg.Interceptors.Copy() +} + +func addClientUserAgent(stack *middleware.Stack, options Options) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + ua.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "dynamodb", goModuleVersion) + if len(options.AppID) > 0 { + ua.AddSDKAgentKey(awsmiddleware.ApplicationIdentifier, options.AppID) + } + + return nil +} + +func getOrAddRequestUserAgent(stack *middleware.Stack) (*awsmiddleware.RequestUserAgent, error) { + id := (*awsmiddleware.RequestUserAgent)(nil).ID() + mw, ok := stack.Build.Get(id) + if !ok { + mw = awsmiddleware.NewRequestUserAgent() + if err := stack.Build.Add(mw, middleware.After); err != nil { + return nil, err + } + } + + ua, ok := mw.(*awsmiddleware.RequestUserAgent) + if !ok { + return nil, fmt.Errorf("%T for %s middleware did not match expected type", mw, id) + } + + return ua, nil +} + +type HTTPSignerV4 interface { + SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error +} + +func resolveHTTPSignerV4(o *Options) { + if o.HTTPSignerV4 != nil { + return + } + o.HTTPSignerV4 = newDefaultV4Signer(*o) +} + +func newDefaultV4Signer(o Options) *v4.Signer { + return v4.NewSigner(func(so *v4.SignerOptions) { + so.Logger = o.Logger + so.LogSigning = o.ClientLogMode.IsSigning() + }) +} + +func addClientRequestID(stack *middleware.Stack) error { + return stack.Build.Add(&awsmiddleware.ClientRequestID{}, middleware.After) +} + +func addComputeContentLength(stack *middleware.Stack) error { + return stack.Build.Add(&smithyhttp.ComputeContentLength{}, middleware.After) +} + +func addRawResponseToMetadata(stack *middleware.Stack) error { + return stack.Deserialize.Add(&awsmiddleware.AddRawResponse{}, middleware.Before) +} + +func addRecordResponseTiming(stack *middleware.Stack) error { + return stack.Deserialize.Add(&awsmiddleware.RecordResponseTiming{}, middleware.After) +} + +func addSpanRetryLoop(stack *middleware.Stack, options Options) error { + return stack.Finalize.Insert(&spanRetryLoop{options: options}, "Retry", middleware.Before) +} + +type spanRetryLoop struct { + options Options +} + +func (*spanRetryLoop) ID() string { + return "spanRetryLoop" +} + +func (m *spanRetryLoop) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + middleware.FinalizeOutput, middleware.Metadata, error, +) { + tracer := operationTracer(m.options.TracerProvider) + ctx, span := tracer.StartSpan(ctx, "RetryLoop") + defer span.End() + + return next.HandleFinalize(ctx, in) +} +func addStreamingEventsPayload(stack *middleware.Stack) error { + return stack.Finalize.Add(&v4.StreamingEventsPayload{}, middleware.Before) +} + +func addUnsignedPayload(stack *middleware.Stack) error { + return stack.Finalize.Insert(&v4.UnsignedPayload{}, "ResolveEndpointV2", middleware.After) +} + +func addComputePayloadSHA256(stack *middleware.Stack) error { + return stack.Finalize.Insert(&v4.ComputePayloadSHA256{}, "ResolveEndpointV2", middleware.After) +} + +func addContentSHA256Header(stack *middleware.Stack) error { + return stack.Finalize.Insert(&v4.ContentSHA256Header{}, (*v4.ComputePayloadSHA256)(nil).ID(), middleware.After) +} + +func addIsWaiterUserAgent(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureWaiter) + return nil + }) +} + +func addIsPaginatorUserAgent(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeaturePaginator) + return nil + }) +} + +func resolveIdempotencyTokenProvider(o *Options) { + if o.IdempotencyTokenProvider != nil { + return + } + o.IdempotencyTokenProvider = smithyrand.NewUUIDIdempotencyToken(cryptorand.Reader) +} + +func addRetry(stack *middleware.Stack, o Options) error { + attempt := retry.NewAttemptMiddleware(o.Retryer, smithyhttp.RequestCloner, func(m *retry.Attempt) { + m.LogAttempts = o.ClientLogMode.IsRetries() + m.OperationMeter = o.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/dynamodb") + }) + if err := stack.Finalize.Insert(attempt, "ResolveAuthScheme", middleware.Before); err != nil { + return err + } + if err := stack.Finalize.Insert(&retry.MetricsHeader{}, attempt.ID(), middleware.After); err != nil { + return err + } + return nil +} + +// resolves EnableEndpointDiscovery configuration +func resolveEnableEndpointDiscoveryFromConfigSources(cfg aws.Config, o *Options) error { + if len(cfg.ConfigSources) == 0 { + return nil + } + value, found, err := internalConfig.ResolveEnableEndpointDiscovery(context.Background(), cfg.ConfigSources) + if err != nil { + return err + } + if found { + o.EndpointDiscovery.EnableEndpointDiscovery = value + } + return nil +} + +// resolves dual-stack endpoint configuration +func resolveUseDualStackEndpoint(cfg aws.Config, o *Options) error { + if len(cfg.ConfigSources) == 0 { + return nil + } + value, found, err := internalConfig.ResolveUseDualStackEndpoint(context.Background(), cfg.ConfigSources) + if err != nil { + return err + } + if found { + o.EndpointOptions.UseDualStackEndpoint = value + } + return nil +} + +// resolves FIPS endpoint configuration +func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error { + if len(cfg.ConfigSources) == 0 { + return nil + } + value, found, err := internalConfig.ResolveUseFIPSEndpoint(context.Background(), cfg.ConfigSources) + if err != nil { + return err + } + if found { + o.EndpointOptions.UseFIPSEndpoint = value + } + return nil +} + +// resolves endpoint cache on client +func resolveEndpointCache(c *Client) { + c.endpointCache = internalEndpointDiscovery.NewEndpointCache(10) +} + +// EndpointDiscoveryOptions used to configure endpoint discovery +type EndpointDiscoveryOptions struct { + // Enables endpoint discovery + EnableEndpointDiscovery aws.EndpointDiscoveryEnableState +} + +func resolveEnableEndpointDiscovery(o *Options) { + if o.EndpointDiscovery.EnableEndpointDiscovery != aws.EndpointDiscoveryUnset { + return + } + o.EndpointDiscovery.EnableEndpointDiscovery = aws.EndpointDiscoveryAuto +} + +func (c *Client) handleEndpointDiscoveryFromService(ctx context.Context, input *DescribeEndpointsInput, region, key string, opt internalEndpointDiscovery.DiscoverEndpointOptions) (internalEndpointDiscovery.Endpoint, error) { + output, err := c.DescribeEndpoints(ctx, input, func(o *Options) { + o.Region = region + + o.EndpointOptions.DisableHTTPS = opt.DisableHTTPS + o.Logger = opt.Logger + }) + if err != nil { + return internalEndpointDiscovery.Endpoint{}, err + } + + endpoint := internalEndpointDiscovery.Endpoint{} + endpoint.Key = key + + for _, e := range output.Endpoints { + if e.Address == nil { + continue + } + address := *e.Address + + var scheme string + if idx := strings.Index(address, "://"); idx != -1 { + scheme = address[:idx] + } + if len(scheme) == 0 { + scheme = "https" + if opt.DisableHTTPS { + scheme = "http" + } + address = fmt.Sprintf("%s://%s", scheme, address) + } + + cachedInMinutes := e.CachePeriodInMinutes + u, err := url.Parse(address) + if err != nil { + continue + } + + addr := internalEndpointDiscovery.WeightedAddress{ + URL: u, + Expired: time.Now().Add(time.Duration(cachedInMinutes) * time.Minute).Round(0), + } + endpoint.Add(addr) + } + + c.endpointCache.Add(endpoint) + return endpoint, nil +} + +func resolveAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) *string { + if mode == aws.AccountIDEndpointModeDisabled { + return nil + } + + if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); ok && ca.Credentials.AccountID != "" { + return aws.String(ca.Credentials.AccountID) + } + + return nil +} + +func addTimeOffsetBuild(stack *middleware.Stack, c *Client) error { + mw := internalmiddleware.AddTimeOffsetMiddleware{Offset: c.timeOffset} + if err := stack.Build.Add(&mw, middleware.After); err != nil { + return err + } + return stack.Deserialize.Insert(&mw, "RecordResponseTiming", middleware.Before) +} +func initializeTimeOffsetResolver(c *Client) { + c.timeOffset = new(atomic.Int64) +} + +func checkAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) error { + switch mode { + case aws.AccountIDEndpointModeUnset: + case aws.AccountIDEndpointModePreferred: + case aws.AccountIDEndpointModeDisabled: + case aws.AccountIDEndpointModeRequired: + if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); !ok { + return fmt.Errorf("accountID is required but not set") + } else if ca.Credentials.AccountID == "" { + return fmt.Errorf("accountID is required but not set") + } + // default check in case invalid mode is configured through request config + default: + return fmt.Errorf("invalid accountID endpoint mode %s, must be preferred/required/disabled", mode) + } + + return nil +} + +func addUserAgentRetryMode(stack *middleware.Stack, options Options) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + switch options.Retryer.(type) { + case *retry.Standard: + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeStandard) + case *retry.AdaptiveMode: + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeAdaptive) + } + return nil +} + +func addUserAgentAccountIDEndpointMode(stack *middleware.Stack, options Options) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + switch options.AccountIDEndpointMode { + case aws.AccountIDEndpointModePreferred: + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureAccountIDModePreferred) + case aws.AccountIDEndpointModeRequired: + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureAccountIDModeRequired) + case aws.AccountIDEndpointModeDisabled: + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureAccountIDModeDisabled) + } + return nil +} + +type setCredentialSourceMiddleware struct { + ua *awsmiddleware.RequestUserAgent + options Options +} + +func (m setCredentialSourceMiddleware) ID() string { return "SetCredentialSourceMiddleware" } + +func (m setCredentialSourceMiddleware) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + asProviderSource, ok := m.options.Credentials.(aws.CredentialProviderSource) + if !ok { + return next.HandleBuild(ctx, in) + } + providerSources := asProviderSource.ProviderSources() + for _, source := range providerSources { + m.ua.AddCredentialsSource(source) + } + return next.HandleBuild(ctx, in) +} + +func addCredentialSource(stack *middleware.Stack, options Options) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + mw := setCredentialSourceMiddleware{ua: ua, options: options} + return stack.Build.Insert(&mw, "UserAgent", middleware.Before) +} + +func resolveTracerProvider(options *Options) { + if options.TracerProvider == nil { + options.TracerProvider = &tracing.NopTracerProvider{} + } +} + +func resolveMeterProvider(options *Options) { + if options.MeterProvider == nil { + options.MeterProvider = metrics.NopMeterProvider{} + } +} + +// IdempotencyTokenProvider interface for providing idempotency token +type IdempotencyTokenProvider interface { + GetIdempotencyToken() (string, error) +} + +func addRecursionDetection(stack *middleware.Stack) error { + return stack.Build.Add(&awsmiddleware.RecursionDetection{}, middleware.After) +} + +func addRequestIDRetrieverMiddleware(stack *middleware.Stack) error { + return stack.Deserialize.Insert(&awsmiddleware.RequestIDRetriever{}, "OperationDeserializer", middleware.Before) + +} + +func addResponseErrorMiddleware(stack *middleware.Stack) error { + return stack.Deserialize.Insert(&awshttp.ResponseErrorWrapper{}, "RequestIDRetriever", middleware.Before) + +} + +func addValidateResponseChecksum(stack *middleware.Stack, options Options) error { + return ddbcust.AddValidateResponseChecksum(stack, ddbcust.AddValidateResponseChecksumOptions{Disable: options.DisableValidateResponseChecksum}) +} + +func addAcceptEncodingGzip(stack *middleware.Stack, options Options) error { + return acceptencodingcust.AddAcceptEncodingGzip(stack, acceptencodingcust.AddAcceptEncodingGzipOptions{Enable: options.EnableAcceptEncodingGzip}) +} + +func addRequestResponseLogging(stack *middleware.Stack, o Options) error { + return stack.Deserialize.Add(&smithyhttp.RequestResponseLogger{ + LogRequest: o.ClientLogMode.IsRequest(), + LogRequestWithBody: o.ClientLogMode.IsRequestWithBody(), + LogResponse: o.ClientLogMode.IsResponse(), + LogResponseWithBody: o.ClientLogMode.IsResponseWithBody(), + }, middleware.After) +} + +type disableHTTPSMiddleware struct { + DisableHTTPS bool +} + +func (*disableHTTPSMiddleware) ID() string { + return "disableHTTPS" +} + +func (m *disableHTTPSMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.DisableHTTPS && !smithyhttp.GetHostnameImmutable(ctx) { + req.URL.Scheme = "http" + } + + return next.HandleFinalize(ctx, in) +} + +func addDisableHTTPSMiddleware(stack *middleware.Stack, o Options) error { + return stack.Finalize.Insert(&disableHTTPSMiddleware{ + DisableHTTPS: o.EndpointOptions.DisableHTTPS, + }, "ResolveEndpointV2", middleware.After) +} + +func addInterceptBeforeRetryLoop(stack *middleware.Stack, opts Options) error { + return stack.Finalize.Insert(&smithyhttp.InterceptBeforeRetryLoop{ + Interceptors: opts.Interceptors.BeforeRetryLoop, + }, "Retry", middleware.Before) +} + +func addInterceptAttempt(stack *middleware.Stack, opts Options) error { + return stack.Finalize.Insert(&smithyhttp.InterceptAttempt{ + BeforeAttempt: opts.Interceptors.BeforeAttempt, + AfterAttempt: opts.Interceptors.AfterAttempt, + }, "Retry", middleware.After) +} + +func addInterceptExecution(stack *middleware.Stack, opts Options) error { + return stack.Initialize.Add(&smithyhttp.InterceptExecution{ + BeforeExecution: opts.Interceptors.BeforeExecution, + AfterExecution: opts.Interceptors.AfterExecution, + }, middleware.Before) +} + +func addInterceptBeforeSerialization(stack *middleware.Stack, opts Options) error { + return stack.Serialize.Insert(&smithyhttp.InterceptBeforeSerialization{ + Interceptors: opts.Interceptors.BeforeSerialization, + }, "OperationSerializer", middleware.Before) +} + +func addInterceptAfterSerialization(stack *middleware.Stack, opts Options) error { + return stack.Serialize.Insert(&smithyhttp.InterceptAfterSerialization{ + Interceptors: opts.Interceptors.AfterSerialization, + }, "OperationSerializer", middleware.After) +} + +func addInterceptBeforeSigning(stack *middleware.Stack, opts Options) error { + return stack.Finalize.Insert(&smithyhttp.InterceptBeforeSigning{ + Interceptors: opts.Interceptors.BeforeSigning, + }, "Signing", middleware.Before) +} + +func addInterceptAfterSigning(stack *middleware.Stack, opts Options) error { + return stack.Finalize.Insert(&smithyhttp.InterceptAfterSigning{ + Interceptors: opts.Interceptors.AfterSigning, + }, "Signing", middleware.After) +} + +func addInterceptTransmit(stack *middleware.Stack, opts Options) error { + return stack.Deserialize.Add(&smithyhttp.InterceptTransmit{ + BeforeTransmit: opts.Interceptors.BeforeTransmit, + AfterTransmit: opts.Interceptors.AfterTransmit, + }, middleware.After) +} + +func addInterceptBeforeDeserialization(stack *middleware.Stack, opts Options) error { + return stack.Deserialize.Insert(&smithyhttp.InterceptBeforeDeserialization{ + Interceptors: opts.Interceptors.BeforeDeserialization, + }, "OperationDeserializer", middleware.After) // (deserialize stack is called in reverse) +} + +func addInterceptAfterDeserialization(stack *middleware.Stack, opts Options) error { + return stack.Deserialize.Insert(&smithyhttp.InterceptAfterDeserialization{ + Interceptors: opts.Interceptors.AfterDeserialization, + }, "OperationDeserializer", middleware.Before) +} + +type spanInitializeStart struct { +} + +func (*spanInitializeStart) ID() string { + return "spanInitializeStart" +} + +func (m *spanInitializeStart) HandleInitialize( + ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, +) ( + middleware.InitializeOutput, middleware.Metadata, error, +) { + ctx, _ = tracing.StartSpan(ctx, "Initialize") + + return next.HandleInitialize(ctx, in) +} + +type spanInitializeEnd struct { +} + +func (*spanInitializeEnd) ID() string { + return "spanInitializeEnd" +} + +func (m *spanInitializeEnd) HandleInitialize( + ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, +) ( + middleware.InitializeOutput, middleware.Metadata, error, +) { + ctx, span := tracing.PopSpan(ctx) + span.End() + + return next.HandleInitialize(ctx, in) +} + +type spanBuildRequestStart struct { +} + +func (*spanBuildRequestStart) ID() string { + return "spanBuildRequestStart" +} + +func (m *spanBuildRequestStart) HandleSerialize( + ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, +) ( + middleware.SerializeOutput, middleware.Metadata, error, +) { + ctx, _ = tracing.StartSpan(ctx, "BuildRequest") + + return next.HandleSerialize(ctx, in) +} + +type spanBuildRequestEnd struct { +} + +func (*spanBuildRequestEnd) ID() string { + return "spanBuildRequestEnd" +} + +func (m *spanBuildRequestEnd) HandleBuild( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +) ( + middleware.BuildOutput, middleware.Metadata, error, +) { + ctx, span := tracing.PopSpan(ctx) + span.End() + + return next.HandleBuild(ctx, in) +} + +func addSpanInitializeStart(stack *middleware.Stack) error { + return stack.Initialize.Add(&spanInitializeStart{}, middleware.Before) +} + +func addSpanInitializeEnd(stack *middleware.Stack) error { + return stack.Initialize.Add(&spanInitializeEnd{}, middleware.After) +} + +func addSpanBuildRequestStart(stack *middleware.Stack) error { + return stack.Serialize.Add(&spanBuildRequestStart{}, middleware.Before) +} + +func addSpanBuildRequestEnd(stack *middleware.Stack) error { + return stack.Build.Add(&spanBuildRequestEnd{}, middleware.After) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_BatchExecuteStatement.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_BatchExecuteStatement.go new file mode 100644 index 0000000000..0b4d0edaf2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_BatchExecuteStatement.go @@ -0,0 +1,235 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package dynamodb + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation allows you to perform batch reads or writes on data stored in +// DynamoDB, using PartiQL. Each read statement in a BatchExecuteStatement must +// specify an equality condition on all key attributes. This enforces that each +// SELECT statement in a batch returns at most a single item. For more information, +// see [Running batch operations with PartiQL for DynamoDB]. +// +// The entire batch must consist of either read statements or write statements, +// you cannot mix both in one batch. +// +// A HTTP 200 response does not mean that all statements in the +// BatchExecuteStatement succeeded. Error details for individual statements can be +// found under the [Error]field of the BatchStatementResponse for each statement. +// +// [Error]: https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_BatchStatementResponse.html#DDB-Type-BatchStatementResponse-Error +// [Running batch operations with PartiQL for DynamoDB]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ql-reference.multiplestatements.batching.html +func (c *Client) BatchExecuteStatement(ctx context.Context, params *BatchExecuteStatementInput, optFns ...func(*Options)) (*BatchExecuteStatementOutput, error) { + if params == nil { + params = &BatchExecuteStatementInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "BatchExecuteStatement", params, optFns, c.addOperationBatchExecuteStatementMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*BatchExecuteStatementOutput) + out.ResultMetadata = metadata + return out, nil +} + +type BatchExecuteStatementInput struct { + + // The list of PartiQL statements representing the batch to run. + // + // This member is required. + Statements []types.BatchStatementRequest + + // Determines the level of detail about either provisioned or on-demand throughput + // consumption that is returned in the response: + // + // - INDEXES - The response includes the aggregate ConsumedCapacity for the + // operation, together with ConsumedCapacity for each table and secondary index + // that was accessed. + // + // Note that some operations, such as GetItem and BatchGetItem , do not access any + // indexes at all. In these cases, specifying INDEXES will only return + // ConsumedCapacity information for table(s). + // + // - TOTAL - The response includes only the aggregate ConsumedCapacity for the + // operation. + // + // - NONE - No ConsumedCapacity details are included in the response. + ReturnConsumedCapacity types.ReturnConsumedCapacity + + noSmithyDocumentSerde +} + +type BatchExecuteStatementOutput struct { + + // The capacity units consumed by the entire operation. The values of the list are + // ordered according to the ordering of the statements. + ConsumedCapacity []types.ConsumedCapacity + + // The response to each PartiQL statement in the batch. The values of the list are + // ordered according to the ordering of the request statements. + Responses []types.BatchStatementResponse + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationBatchExecuteStatementMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson10_serializeOpBatchExecuteStatement{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpBatchExecuteStatement{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "BatchExecuteStatement"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpBatchExecuteStatementValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opBatchExecuteStatement(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addValidateResponseChecksum(stack, options); err != nil { + return err + } + if err = addAcceptEncodingGzip(stack, options); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opBatchExecuteStatement(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "BatchExecuteStatement", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_BatchGetItem.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_BatchGetItem.go new file mode 100644 index 0000000000..5e2394a852 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_BatchGetItem.go @@ -0,0 +1,428 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package dynamodb + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// The BatchGetItem operation returns the attributes of one or more items from one +// or more tables. You identify requested items by primary key. +// +// A single operation can retrieve up to 16 MB of data, which can contain as many +// as 100 items. BatchGetItem returns a partial result if the response size limit +// is exceeded, the table's provisioned throughput is exceeded, more than 1MB per +// partition is requested, or an internal processing failure occurs. If a partial +// result is returned, the operation returns a value for UnprocessedKeys . You can +// use this value to retry the operation starting with the next item to get. +// +// If you request more than 100 items, BatchGetItem returns a ValidationException +// with the message "Too many items requested for the BatchGetItem call." +// +// For example, if you ask to retrieve 100 items, but each individual item is 300 +// KB in size, the system returns 52 items (so as not to exceed the 16 MB limit). +// It also returns an appropriate UnprocessedKeys value so you can get the next +// page of results. If desired, your application can include its own logic to +// assemble the pages of results into one dataset. +// +// If none of the items can be processed due to insufficient provisioned +// throughput on all of the tables in the request, then BatchGetItem returns a +// ProvisionedThroughputExceededException . If at least one of the items is +// successfully processed, then BatchGetItem completes successfully, while +// returning the keys of the unread items in UnprocessedKeys . +// +// If DynamoDB returns any unprocessed items, you should retry the batch operation +// on those items. However, we strongly recommend that you use an exponential +// backoff algorithm. If you retry the batch operation immediately, the underlying +// read or write requests can still fail due to throttling on the individual +// tables. If you delay the batch operation using exponential backoff, the +// individual requests in the batch are much more likely to succeed. +// +// For more information, see [Batch Operations and Error Handling] in the Amazon DynamoDB Developer Guide. +// +// By default, BatchGetItem performs eventually consistent reads on every table in +// the request. If you want strongly consistent reads instead, you can set +// ConsistentRead to true for any or all tables. +// +// In order to minimize response latency, BatchGetItem may retrieve items in +// parallel. +// +// When designing your application, keep in mind that DynamoDB does not return +// items in any particular order. To help parse the response by item, include the +// primary key values for the items in your request in the ProjectionExpression +// parameter. +// +// If a requested item does not exist, it is not returned in the result. Requests +// for nonexistent items consume the minimum read capacity units according to the +// type of read. For more information, see [Working with Tables]in the Amazon DynamoDB Developer Guide. +// +// BatchGetItem will result in a ValidationException if the same key is specified +// multiple times. +// +// [Batch Operations and Error Handling]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ErrorHandling.html#BatchOperations +// [Working with Tables]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#CapacityUnitCalculations +func (c *Client) BatchGetItem(ctx context.Context, params *BatchGetItemInput, optFns ...func(*Options)) (*BatchGetItemOutput, error) { + if params == nil { + params = &BatchGetItemInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "BatchGetItem", params, optFns, c.addOperationBatchGetItemMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*BatchGetItemOutput) + out.ResultMetadata = metadata + return out, nil +} + +// Represents the input of a BatchGetItem operation. +type BatchGetItemInput struct { + + // A map of one or more table names or table ARNs and, for each table, a map that + // describes one or more items to retrieve from that table. Each table name or ARN + // can be used only once per BatchGetItem request. + // + // Each element in the map of items to retrieve consists of the following: + // + // - ConsistentRead - If true , a strongly consistent read is used; if false (the + // default), an eventually consistent read is used. + // + // - ExpressionAttributeNames - One or more substitution tokens for attribute + // names in the ProjectionExpression parameter. The following are some use cases + // for using ExpressionAttributeNames : + // + // - To access an attribute whose name conflicts with a DynamoDB reserved word. + // + // - To create a placeholder for repeating occurrences of an attribute name in + // an expression. + // + // - To prevent special characters in an attribute name from being + // misinterpreted in an expression. + // + // Use the # character in an expression to dereference an attribute name. For + // example, consider the following attribute name: + // + // - Percentile + // + // The name of this attribute conflicts with a reserved word, so it cannot be used + // directly in an expression. (For the complete list of reserved words, see [Reserved Words]in + // the Amazon DynamoDB Developer Guide). To work around this, you could specify the + // following for ExpressionAttributeNames : + // + // - {"#P":"Percentile"} + // + // You could then use this substitution in an expression, as in this example: + // + // - #P = :val + // + // Tokens that begin with the : character are expression attribute values, which + // are placeholders for the actual value at runtime. + // + // For more information about expression attribute names, see [Accessing Item Attributes]in the Amazon + // DynamoDB Developer Guide. + // + // - Keys - An array of primary key attribute values that define specific items + // in the table. For each primary key, you must provide all of the key attributes. + // For example, with a simple primary key, you only need to provide the partition + // key value. For a composite key, you must provide both the partition key value + // and the sort key value. + // + // - ProjectionExpression - A string that identifies one or more attributes to + // retrieve from the table. These attributes can include scalars, sets, or elements + // of a JSON document. The attributes in the expression must be separated by + // commas. + // + // If no attribute names are specified, then all attributes are returned. If any + // of the requested attributes are not found, they do not appear in the result. + // + // For more information, see [Accessing Item Attributes]in the Amazon DynamoDB Developer Guide. + // + // - AttributesToGet - This is a legacy parameter. Use ProjectionExpression + // instead. For more information, see [AttributesToGet]in the Amazon DynamoDB Developer Guide. + // + // [Reserved Words]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html + // [Accessing Item Attributes]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html + // [AttributesToGet]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributesToGet.html + // + // This member is required. + RequestItems map[string]types.KeysAndAttributes + + // Determines the level of detail about either provisioned or on-demand throughput + // consumption that is returned in the response: + // + // - INDEXES - The response includes the aggregate ConsumedCapacity for the + // operation, together with ConsumedCapacity for each table and secondary index + // that was accessed. + // + // Note that some operations, such as GetItem and BatchGetItem , do not access any + // indexes at all. In these cases, specifying INDEXES will only return + // ConsumedCapacity information for table(s). + // + // - TOTAL - The response includes only the aggregate ConsumedCapacity for the + // operation. + // + // - NONE - No ConsumedCapacity details are included in the response. + ReturnConsumedCapacity types.ReturnConsumedCapacity + + noSmithyDocumentSerde +} + +func (in *BatchGetItemInput) bindEndpointParams(p *EndpointParameters) { + func() { + v1 := in.RequestItems + var v2 []string + for k := range v1 { + v2 = append(v2, k) + } + p.ResourceArnList = v2 + }() + +} + +// Represents the output of a BatchGetItem operation. +type BatchGetItemOutput struct { + + // The read capacity units consumed by the entire BatchGetItem operation. + // + // Each element consists of: + // + // - TableName - The table that consumed the provisioned throughput. + // + // - CapacityUnits - The total number of capacity units consumed. + ConsumedCapacity []types.ConsumedCapacity + + // A map of table name or table ARN to a list of items. Each object in Responses + // consists of a table name or ARN, along with a map of attribute data consisting + // of the data type and attribute value. + Responses map[string][]map[string]types.AttributeValue + + // A map of tables and their respective keys that were not processed with the + // current response. The UnprocessedKeys value is in the same form as RequestItems + // , so the value can be provided directly to a subsequent BatchGetItem operation. + // For more information, see RequestItems in the Request Parameters section. + // + // Each element consists of: + // + // - Keys - An array of primary key attribute values that define specific items + // in the table. + // + // - ProjectionExpression - One or more attributes to be retrieved from the table + // or index. By default, all attributes are returned. If a requested attribute is + // not found, it does not appear in the result. + // + // - ConsistentRead - The consistency of a read operation. If set to true , then + // a strongly consistent read is used; otherwise, an eventually consistent read is + // used. + // + // If there are no unprocessed keys remaining, the response contains an empty + // UnprocessedKeys map. + UnprocessedKeys map[string]types.KeysAndAttributes + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationBatchGetItemMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson10_serializeOpBatchGetItem{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpBatchGetItem{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "BatchGetItem"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpBatchGetItemDiscoverEndpointMiddleware(stack, options, c); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpBatchGetItemValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opBatchGetItem(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addValidateResponseChecksum(stack, options); err != nil { + return err + } + if err = addAcceptEncodingGzip(stack, options); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func addOpBatchGetItemDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error { + return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{ + Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){ + func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) { + opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS + opt.Logger = o.Logger + }, + }, + DiscoverOperation: c.fetchOpBatchGetItemDiscoverEndpoint, + EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery, + EndpointDiscoveryRequired: false, + Region: o.Region, + }, "ResolveEndpointV2", middleware.After) +} + +func (c *Client) fetchOpBatchGetItemDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) { + input := getOperationInput(ctx) + in, ok := input.(*BatchGetItemInput) + if !ok { + return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input) + } + _ = in + + identifierMap := make(map[string]string, 0) + identifierMap["sdk#Region"] = region + + key := fmt.Sprintf("DynamoDB.%v", identifierMap) + + if v, ok := c.endpointCache.Get(key); ok { + return v, nil + } + + discoveryOperationInput := &DescribeEndpointsInput{} + + opt := internalEndpointDiscovery.DiscoverEndpointOptions{} + for _, fn := range optFns { + fn(&opt) + } + + go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt) + return internalEndpointDiscovery.WeightedAddress{}, nil +} + +func newServiceMetadataMiddleware_opBatchGetItem(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "BatchGetItem", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_BatchWriteItem.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_BatchWriteItem.go new file mode 100644 index 0000000000..2056cdd927 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_BatchWriteItem.go @@ -0,0 +1,448 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package dynamodb + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// The BatchWriteItem operation puts or deletes multiple items in one or more +// tables. A single call to BatchWriteItem can transmit up to 16MB of data over +// the network, consisting of up to 25 item put or delete operations. While +// individual items can be up to 400 KB once stored, it's important to note that an +// item's representation might be greater than 400KB while being sent in DynamoDB's +// JSON format for the API call. For more details on this distinction, see [Naming Rules and Data Types]. +// +// BatchWriteItem cannot update items. If you perform a BatchWriteItem operation +// on an existing item, that item's values will be overwritten by the operation and +// it will appear like it was updated. To update items, we recommend you use the +// UpdateItem action. +// +// The individual PutItem and DeleteItem operations specified in BatchWriteItem +// are atomic; however BatchWriteItem as a whole is not. If any requested +// operations fail because the table's provisioned throughput is exceeded or an +// internal processing failure occurs, the failed operations are returned in the +// UnprocessedItems response parameter. You can investigate and optionally resend +// the requests. Typically, you would call BatchWriteItem in a loop. Each +// iteration would check for unprocessed items and submit a new BatchWriteItem +// request with those unprocessed items until all items have been processed. +// +// For tables and indexes with provisioned capacity, if none of the items can be +// processed due to insufficient provisioned throughput on all of the tables in the +// request, then BatchWriteItem returns a ProvisionedThroughputExceededException . +// For all tables and indexes, if none of the items can be processed due to other +// throttling scenarios (such as exceeding partition level limits), then +// BatchWriteItem returns a ThrottlingException . +// +// If DynamoDB returns any unprocessed items, you should retry the batch operation +// on those items. However, we strongly recommend that you use an exponential +// backoff algorithm. If you retry the batch operation immediately, the underlying +// read or write requests can still fail due to throttling on the individual +// tables. If you delay the batch operation using exponential backoff, the +// individual requests in the batch are much more likely to succeed. +// +// For more information, see [Batch Operations and Error Handling] in the Amazon DynamoDB Developer Guide. +// +// With BatchWriteItem , you can efficiently write or delete large amounts of data, +// such as from Amazon EMR, or copy data from another database into DynamoDB. In +// order to improve performance with these large-scale operations, BatchWriteItem +// does not behave in the same way as individual PutItem and DeleteItem calls +// would. For example, you cannot specify conditions on individual put and delete +// requests, and BatchWriteItem does not return deleted items in the response. +// +// If you use a programming language that supports concurrency, you can use +// threads to write items in parallel. Your application must include the necessary +// logic to manage the threads. With languages that don't support threading, you +// must update or delete the specified items one at a time. In both situations, +// BatchWriteItem performs the specified put and delete operations in parallel, +// giving you the power of the thread pool approach without having to introduce +// complexity into your application. +// +// Parallel processing reduces latency, but each specified put and delete request +// consumes the same number of write capacity units whether it is processed in +// parallel or not. Delete operations on nonexistent items consume one write +// capacity unit. +// +// If one or more of the following is true, DynamoDB rejects the entire batch +// write operation: +// +// - One or more tables specified in the BatchWriteItem request does not exist. +// +// - Primary key attributes specified on an item in the request do not match +// those in the corresponding table's primary key schema. +// +// - You try to perform multiple operations on the same item in the same +// BatchWriteItem request. For example, you cannot put and delete the same item +// in the same BatchWriteItem request. +// +// - Your request contains at least two items with identical hash and range keys +// (which essentially is two put operations). +// +// - There are more than 25 requests in the batch. +// +// - Any individual item in a batch exceeds 400 KB. +// +// - The total request size exceeds 16 MB. +// +// - Any individual items with keys exceeding the key length limits. For a +// partition key, the limit is 2048 bytes and for a sort key, the limit is 1024 +// bytes. +// +// [Batch Operations and Error Handling]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ErrorHandling.html#Programming.Errors.BatchOperations +// [Naming Rules and Data Types]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.NamingRulesDataTypes.html +func (c *Client) BatchWriteItem(ctx context.Context, params *BatchWriteItemInput, optFns ...func(*Options)) (*BatchWriteItemOutput, error) { + if params == nil { + params = &BatchWriteItemInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "BatchWriteItem", params, optFns, c.addOperationBatchWriteItemMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*BatchWriteItemOutput) + out.ResultMetadata = metadata + return out, nil +} + +// Represents the input of a BatchWriteItem operation. +type BatchWriteItemInput struct { + + // A map of one or more table names or table ARNs and, for each table, a list of + // operations to be performed ( DeleteRequest or PutRequest ). Each element in the + // map consists of the following: + // + // - DeleteRequest - Perform a DeleteItem operation on the specified item. The + // item to be deleted is identified by a Key subelement: + // + // - Key - A map of primary key attribute values that uniquely identify the item. + // Each entry in this map consists of an attribute name and an attribute value. For + // each primary key, you must provide all of the key attributes. For example, with + // a simple primary key, you only need to provide a value for the partition key. + // For a composite primary key, you must provide values for both the partition key + // and the sort key. + // + // - PutRequest - Perform a PutItem operation on the specified item. The item to + // be put is identified by an Item subelement: + // + // - Item - A map of attributes and their values. Each entry in this map consists + // of an attribute name and an attribute value. Attribute values must not be null; + // string and binary type attributes must have lengths greater than zero; and set + // type attributes must not be empty. Requests that contain empty values are + // rejected with a ValidationException exception. + // + // If you specify any attributes that are part of an index key, then the data + // types for those attributes must match those of the schema in the table's + // attribute definition. + // + // This member is required. + RequestItems map[string][]types.WriteRequest + + // Determines the level of detail about either provisioned or on-demand throughput + // consumption that is returned in the response: + // + // - INDEXES - The response includes the aggregate ConsumedCapacity for the + // operation, together with ConsumedCapacity for each table and secondary index + // that was accessed. + // + // Note that some operations, such as GetItem and BatchGetItem , do not access any + // indexes at all. In these cases, specifying INDEXES will only return + // ConsumedCapacity information for table(s). + // + // - TOTAL - The response includes only the aggregate ConsumedCapacity for the + // operation. + // + // - NONE - No ConsumedCapacity details are included in the response. + ReturnConsumedCapacity types.ReturnConsumedCapacity + + // Determines whether item collection metrics are returned. If set to SIZE , the + // response includes statistics about item collections, if any, that were modified + // during the operation are returned in the response. If set to NONE (the + // default), no statistics are returned. + ReturnItemCollectionMetrics types.ReturnItemCollectionMetrics + + noSmithyDocumentSerde +} + +func (in *BatchWriteItemInput) bindEndpointParams(p *EndpointParameters) { + func() { + v1 := in.RequestItems + var v2 []string + for k := range v1 { + v2 = append(v2, k) + } + p.ResourceArnList = v2 + }() + +} + +// Represents the output of a BatchWriteItem operation. +type BatchWriteItemOutput struct { + + // The capacity units consumed by the entire BatchWriteItem operation. + // + // Each element consists of: + // + // - TableName - The table that consumed the provisioned throughput. + // + // - CapacityUnits - The total number of capacity units consumed. + ConsumedCapacity []types.ConsumedCapacity + + // A list of tables that were processed by BatchWriteItem and, for each table, + // information about any item collections that were affected by individual + // DeleteItem or PutItem operations. + // + // Each entry consists of the following subelements: + // + // - ItemCollectionKey - The partition key value of the item collection. This is + // the same as the partition key value of the item. + // + // - SizeEstimateRangeGB - An estimate of item collection size, expressed in GB. + // This is a two-element array containing a lower bound and an upper bound for the + // estimate. The estimate includes the size of all the items in the table, plus the + // size of all attributes projected into all of the local secondary indexes on the + // table. Use this estimate to measure whether a local secondary index is + // approaching its size limit. + // + // The estimate is subject to change over time; therefore, do not rely on the + // precision or accuracy of the estimate. + ItemCollectionMetrics map[string][]types.ItemCollectionMetrics + + // A map of tables and requests against those tables that were not processed. The + // UnprocessedItems value is in the same form as RequestItems , so you can provide + // this value directly to a subsequent BatchWriteItem operation. For more + // information, see RequestItems in the Request Parameters section. + // + // Each UnprocessedItems entry consists of a table name or table ARN and, for that + // table, a list of operations to perform ( DeleteRequest or PutRequest ). + // + // - DeleteRequest - Perform a DeleteItem operation on the specified item. The + // item to be deleted is identified by a Key subelement: + // + // - Key - A map of primary key attribute values that uniquely identify the item. + // Each entry in this map consists of an attribute name and an attribute value. + // + // - PutRequest - Perform a PutItem operation on the specified item. The item to + // be put is identified by an Item subelement: + // + // - Item - A map of attributes and their values. Each entry in this map consists + // of an attribute name and an attribute value. Attribute values must not be null; + // string and binary type attributes must have lengths greater than zero; and set + // type attributes must not be empty. Requests that contain empty values will be + // rejected with a ValidationException exception. + // + // If you specify any attributes that are part of an index key, then the data + // types for those attributes must match those of the schema in the table's + // attribute definition. + // + // If there are no unprocessed items remaining, the response contains an empty + // UnprocessedItems map. + UnprocessedItems map[string][]types.WriteRequest + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationBatchWriteItemMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson10_serializeOpBatchWriteItem{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpBatchWriteItem{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "BatchWriteItem"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpBatchWriteItemDiscoverEndpointMiddleware(stack, options, c); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpBatchWriteItemValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opBatchWriteItem(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addValidateResponseChecksum(stack, options); err != nil { + return err + } + if err = addAcceptEncodingGzip(stack, options); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func addOpBatchWriteItemDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error { + return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{ + Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){ + func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) { + opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS + opt.Logger = o.Logger + }, + }, + DiscoverOperation: c.fetchOpBatchWriteItemDiscoverEndpoint, + EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery, + EndpointDiscoveryRequired: false, + Region: o.Region, + }, "ResolveEndpointV2", middleware.After) +} + +func (c *Client) fetchOpBatchWriteItemDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) { + input := getOperationInput(ctx) + in, ok := input.(*BatchWriteItemInput) + if !ok { + return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input) + } + _ = in + + identifierMap := make(map[string]string, 0) + identifierMap["sdk#Region"] = region + + key := fmt.Sprintf("DynamoDB.%v", identifierMap) + + if v, ok := c.endpointCache.Get(key); ok { + return v, nil + } + + discoveryOperationInput := &DescribeEndpointsInput{} + + opt := internalEndpointDiscovery.DiscoverEndpointOptions{} + for _, fn := range optFns { + fn(&opt) + } + + go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt) + return internalEndpointDiscovery.WeightedAddress{}, nil +} + +func newServiceMetadataMiddleware_opBatchWriteItem(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "BatchWriteItem", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_CreateBackup.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_CreateBackup.go new file mode 100644 index 0000000000..db7e077610 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_CreateBackup.go @@ -0,0 +1,287 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package dynamodb + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates a backup for an existing table. +// +// Each time you create an on-demand backup, the entire table data is backed up. +// There is no limit to the number of on-demand backups that can be taken. +// +// When you create an on-demand backup, a time marker of the request is cataloged, +// and the backup is created asynchronously, by applying all changes until the time +// of the request to the last full table snapshot. Backup requests are processed +// instantaneously and become available for restore within minutes. +// +// You can call CreateBackup at a maximum rate of 50 times per second. +// +// All backups in DynamoDB work without consuming any provisioned throughput on +// the table. +// +// If you submit a backup request on 2018-12-14 at 14:25:00, the backup is +// guaranteed to contain all data committed to the table up to 14:24:00, and data +// committed after 14:26:00 will not be. The backup might contain data +// modifications made between 14:24:00 and 14:26:00. On-demand backup does not +// support causal consistency. +// +// Along with data, the following are also included on the backups: +// +// - Global secondary indexes (GSIs) +// +// - Local secondary indexes (LSIs) +// +// - Streams +// +// - Provisioned read and write capacity +func (c *Client) CreateBackup(ctx context.Context, params *CreateBackupInput, optFns ...func(*Options)) (*CreateBackupOutput, error) { + if params == nil { + params = &CreateBackupInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CreateBackup", params, optFns, c.addOperationCreateBackupMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CreateBackupOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CreateBackupInput struct { + + // Specified name for the backup. + // + // This member is required. + BackupName *string + + // The name of the table. You can also provide the Amazon Resource Name (ARN) of + // the table in this parameter. + // + // This member is required. + TableName *string + + noSmithyDocumentSerde +} + +func (in *CreateBackupInput) bindEndpointParams(p *EndpointParameters) { + + p.ResourceArn = in.TableName + +} + +type CreateBackupOutput struct { + + // Contains the details of the backup created for the table. + BackupDetails *types.BackupDetails + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCreateBackupMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson10_serializeOpCreateBackup{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpCreateBackup{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "CreateBackup"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpCreateBackupDiscoverEndpointMiddleware(stack, options, c); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpCreateBackupValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateBackup(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addValidateResponseChecksum(stack, options); err != nil { + return err + } + if err = addAcceptEncodingGzip(stack, options); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func addOpCreateBackupDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error { + return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{ + Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){ + func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) { + opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS + opt.Logger = o.Logger + }, + }, + DiscoverOperation: c.fetchOpCreateBackupDiscoverEndpoint, + EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery, + EndpointDiscoveryRequired: false, + Region: o.Region, + }, "ResolveEndpointV2", middleware.After) +} + +func (c *Client) fetchOpCreateBackupDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) { + input := getOperationInput(ctx) + in, ok := input.(*CreateBackupInput) + if !ok { + return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input) + } + _ = in + + identifierMap := make(map[string]string, 0) + identifierMap["sdk#Region"] = region + + key := fmt.Sprintf("DynamoDB.%v", identifierMap) + + if v, ok := c.endpointCache.Get(key); ok { + return v, nil + } + + discoveryOperationInput := &DescribeEndpointsInput{} + + opt := internalEndpointDiscovery.DiscoverEndpointOptions{} + for _, fn := range optFns { + fn(&opt) + } + + go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt) + return internalEndpointDiscovery.WeightedAddress{}, nil +} + +func newServiceMetadataMiddleware_opCreateBackup(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "CreateBackup", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_CreateGlobalTable.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_CreateGlobalTable.go new file mode 100644 index 0000000000..163348c828 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_CreateGlobalTable.go @@ -0,0 +1,309 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package dynamodb + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates a global table from an existing table. A global table creates a +// replication relationship between two or more DynamoDB tables with the same table +// name in the provided Regions. +// +// This documentation is for version 2017.11.29 (Legacy) of global tables, which +// should be avoided for new global tables. Customers should use [Global Tables version 2019.11.21 (Current)]when possible, +// because it provides greater flexibility, higher efficiency, and consumes less +// write capacity than 2017.11.29 (Legacy). +// +// To determine which version you're using, see [Determining the global table version you are using]. To update existing global tables +// from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see [Upgrading global tables]. +// +// If you want to add a new replica table to a global table, each of the following +// conditions must be true: +// +// - The table must have the same primary key as all of the other replicas. +// +// - The table must have the same name as all of the other replicas. +// +// - The table must have DynamoDB Streams enabled, with the stream containing +// both the new and the old images of the item. +// +// - None of the replica tables in the global table can contain any data. +// +// If global secondary indexes are specified, then the following conditions must +// also be met: +// +// - The global secondary indexes must have the same name. +// +// - The global secondary indexes must have the same hash key and sort key (if +// present). +// +// If local secondary indexes are specified, then the following conditions must +// also be met: +// +// - The local secondary indexes must have the same name. +// +// - The local secondary indexes must have the same hash key and sort key (if +// present). +// +// Write capacity settings should be set consistently across your replica tables +// and secondary indexes. DynamoDB strongly recommends enabling auto scaling to +// manage the write capacity settings for all of your global tables replicas and +// indexes. +// +// If you prefer to manage write capacity settings manually, you should provision +// equal replicated write capacity units to your replica tables. You should also +// provision equal replicated write capacity units to matching secondary indexes +// across your global table. +// +// [Global Tables version 2019.11.21 (Current)]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GlobalTables.html +// [Upgrading global tables]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/V2globaltables_upgrade.html +// [Determining the global table version you are using]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.DetermineVersion.html +func (c *Client) CreateGlobalTable(ctx context.Context, params *CreateGlobalTableInput, optFns ...func(*Options)) (*CreateGlobalTableOutput, error) { + if params == nil { + params = &CreateGlobalTableInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CreateGlobalTable", params, optFns, c.addOperationCreateGlobalTableMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CreateGlobalTableOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CreateGlobalTableInput struct { + + // The global table name. + // + // This member is required. + GlobalTableName *string + + // The Regions where the global table needs to be created. + // + // This member is required. + ReplicationGroup []types.Replica + + noSmithyDocumentSerde +} + +func (in *CreateGlobalTableInput) bindEndpointParams(p *EndpointParameters) { + + p.ResourceArn = in.GlobalTableName + +} + +type CreateGlobalTableOutput struct { + + // Contains the details of the global table. + GlobalTableDescription *types.GlobalTableDescription + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCreateGlobalTableMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson10_serializeOpCreateGlobalTable{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpCreateGlobalTable{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "CreateGlobalTable"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpCreateGlobalTableDiscoverEndpointMiddleware(stack, options, c); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpCreateGlobalTableValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateGlobalTable(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addValidateResponseChecksum(stack, options); err != nil { + return err + } + if err = addAcceptEncodingGzip(stack, options); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func addOpCreateGlobalTableDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error { + return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{ + Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){ + func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) { + opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS + opt.Logger = o.Logger + }, + }, + DiscoverOperation: c.fetchOpCreateGlobalTableDiscoverEndpoint, + EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery, + EndpointDiscoveryRequired: false, + Region: o.Region, + }, "ResolveEndpointV2", middleware.After) +} + +func (c *Client) fetchOpCreateGlobalTableDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) { + input := getOperationInput(ctx) + in, ok := input.(*CreateGlobalTableInput) + if !ok { + return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input) + } + _ = in + + identifierMap := make(map[string]string, 0) + identifierMap["sdk#Region"] = region + + key := fmt.Sprintf("DynamoDB.%v", identifierMap) + + if v, ok := c.endpointCache.Get(key); ok { + return v, nil + } + + discoveryOperationInput := &DescribeEndpointsInput{} + + opt := internalEndpointDiscovery.DiscoverEndpointOptions{} + for _, fn := range optFns { + fn(&opt) + } + + go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt) + return internalEndpointDiscovery.WeightedAddress{}, nil +} + +func newServiceMetadataMiddleware_opCreateGlobalTable(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "CreateGlobalTable", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_CreateTable.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_CreateTable.go new file mode 100644 index 0000000000..7d4510adb8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_CreateTable.go @@ -0,0 +1,475 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package dynamodb + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// The CreateTable operation adds a new table to your account. In an Amazon Web +// Services account, table names must be unique within each Region. That is, you +// can have two tables with same name if you create the tables in different +// Regions. +// +// CreateTable is an asynchronous operation. Upon receiving a CreateTable request, +// DynamoDB immediately returns a response with a TableStatus of CREATING . After +// the table is created, DynamoDB sets the TableStatus to ACTIVE . You can perform +// read and write operations only on an ACTIVE table. +// +// You can optionally define secondary indexes on the new table, as part of the +// CreateTable operation. If you want to create multiple tables with secondary +// indexes on them, you must create the tables sequentially. Only one table with +// secondary indexes can be in the CREATING state at any given time. +// +// You can use the DescribeTable action to check the table status. +func (c *Client) CreateTable(ctx context.Context, params *CreateTableInput, optFns ...func(*Options)) (*CreateTableOutput, error) { + if params == nil { + params = &CreateTableInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CreateTable", params, optFns, c.addOperationCreateTableMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CreateTableOutput) + out.ResultMetadata = metadata + return out, nil +} + +// Represents the input of a CreateTable operation. +type CreateTableInput struct { + + // An array of attributes that describe the key schema for the table and indexes. + // + // This member is required. + AttributeDefinitions []types.AttributeDefinition + + // Specifies the attributes that make up the primary key for a table or an index. + // The attributes in KeySchema must also be defined in the AttributeDefinitions + // array. For more information, see [Data Model]in the Amazon DynamoDB Developer Guide. + // + // Each KeySchemaElement in the array is composed of: + // + // - AttributeName - The name of this key attribute. + // + // - KeyType - The role that the key attribute will assume: + // + // - HASH - partition key + // + // - RANGE - sort key + // + // The partition key of an item is also known as its hash attribute. The term + // "hash attribute" derives from the DynamoDB usage of an internal hash function to + // evenly distribute data items across partitions, based on their partition key + // values. + // + // The sort key of an item is also known as its range attribute. The term "range + // attribute" derives from the way DynamoDB stores items with the same partition + // key physically close together, in sorted order by the sort key value. + // + // For a simple primary key (partition key), you must provide exactly one element + // with a KeyType of HASH . + // + // For a composite primary key (partition key and sort key), you must provide + // exactly two elements, in this order: The first element must have a KeyType of + // HASH , and the second element must have a KeyType of RANGE . + // + // For more information, see [Working with Tables] in the Amazon DynamoDB Developer Guide. + // + // [Data Model]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html + // [Working with Tables]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#WorkingWithTables.primary.key + // + // This member is required. + KeySchema []types.KeySchemaElement + + // The name of the table to create. You can also provide the Amazon Resource Name + // (ARN) of the table in this parameter. + // + // This member is required. + TableName *string + + // Controls how you are charged for read and write throughput and how you manage + // capacity. This setting can be changed later. + // + // - PAY_PER_REQUEST - We recommend using PAY_PER_REQUEST for most DynamoDB + // workloads. PAY_PER_REQUEST sets the billing mode to [On-demand capacity mode]. + // + // - PROVISIONED - We recommend using PROVISIONED for steady workloads with + // predictable growth where capacity requirements can be reliably forecasted. + // PROVISIONED sets the billing mode to [Provisioned capacity mode]. + // + // [Provisioned capacity mode]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/provisioned-capacity-mode.html + // [On-demand capacity mode]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/on-demand-capacity-mode.html + BillingMode types.BillingMode + + // Indicates whether deletion protection is to be enabled (true) or disabled + // (false) on the table. + DeletionProtectionEnabled *bool + + // One or more global secondary indexes (the maximum is 20) to be created on the + // table. Each global secondary index in the array includes the following: + // + // - IndexName - The name of the global secondary index. Must be unique only for + // this table. + // + // - KeySchema - Specifies the key schema for the global secondary index. + // + // - Projection - Specifies attributes that are copied (projected) from the table + // into the index. These are in addition to the primary key attributes and index + // key attributes, which are automatically projected. Each attribute specification + // is composed of: + // + // - ProjectionType - One of the following: + // + // - KEYS_ONLY - Only the index and primary keys are projected into the index. + // + // - INCLUDE - Only the specified table attributes are projected into the index. + // The list of projected attributes is in NonKeyAttributes . + // + // - ALL - All of the table attributes are projected into the index. + // + // - NonKeyAttributes - A list of one or more non-key attribute names that are + // projected into the secondary index. The total count of attributes provided in + // NonKeyAttributes , summed across all of the secondary indexes, must not exceed + // 100. If you project the same attribute into two different indexes, this counts + // as two distinct attributes when determining the total. This limit only applies + // when you specify the ProjectionType of INCLUDE . You still can specify the + // ProjectionType of ALL to project all attributes from the source table, even if + // the table has more than 100 attributes. + // + // - ProvisionedThroughput - The provisioned throughput settings for the global + // secondary index, consisting of read and write capacity units. + GlobalSecondaryIndexes []types.GlobalSecondaryIndex + + // One or more local secondary indexes (the maximum is 5) to be created on the + // table. Each index is scoped to a given partition key value. There is a 10 GB + // size limit per partition key value; otherwise, the size of a local secondary + // index is unconstrained. + // + // Each local secondary index in the array includes the following: + // + // - IndexName - The name of the local secondary index. Must be unique only for + // this table. + // + // - KeySchema - Specifies the key schema for the local secondary index. The key + // schema must begin with the same partition key as the table. + // + // - Projection - Specifies attributes that are copied (projected) from the table + // into the index. These are in addition to the primary key attributes and index + // key attributes, which are automatically projected. Each attribute specification + // is composed of: + // + // - ProjectionType - One of the following: + // + // - KEYS_ONLY - Only the index and primary keys are projected into the index. + // + // - INCLUDE - Only the specified table attributes are projected into the index. + // The list of projected attributes is in NonKeyAttributes . + // + // - ALL - All of the table attributes are projected into the index. + // + // - NonKeyAttributes - A list of one or more non-key attribute names that are + // projected into the secondary index. The total count of attributes provided in + // NonKeyAttributes , summed across all of the secondary indexes, must not exceed + // 100. If you project the same attribute into two different indexes, this counts + // as two distinct attributes when determining the total. This limit only applies + // when you specify the ProjectionType of INCLUDE . You still can specify the + // ProjectionType of ALL to project all attributes from the source table, even if + // the table has more than 100 attributes. + LocalSecondaryIndexes []types.LocalSecondaryIndex + + // Sets the maximum number of read and write units for the specified table in + // on-demand capacity mode. If you use this parameter, you must specify + // MaxReadRequestUnits , MaxWriteRequestUnits , or both. + OnDemandThroughput *types.OnDemandThroughput + + // Represents the provisioned throughput settings for a specified table or index. + // The settings can be modified using the UpdateTable operation. + // + // If you set BillingMode as PROVISIONED , you must specify this property. If you + // set BillingMode as PAY_PER_REQUEST , you cannot specify this property. + // + // For current minimum and maximum provisioned throughput values, see [Service, Account, and Table Quotas] in the + // Amazon DynamoDB Developer Guide. + // + // [Service, Account, and Table Quotas]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html + ProvisionedThroughput *types.ProvisionedThroughput + + // An Amazon Web Services resource-based policy document in JSON format that will + // be attached to the table. + // + // When you attach a resource-based policy while creating a table, the policy + // application is strongly consistent. + // + // The maximum size supported for a resource-based policy document is 20 KB. + // DynamoDB counts whitespaces when calculating the size of a policy against this + // limit. For a full list of all considerations that apply for resource-based + // policies, see [Resource-based policy considerations]. + // + // You need to specify the CreateTable and PutResourcePolicy IAM actions for + // authorizing a user to create a table with a resource-based policy. + // + // [Resource-based policy considerations]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/rbac-considerations.html + ResourcePolicy *string + + // Represents the settings used to enable server-side encryption. + SSESpecification *types.SSESpecification + + // The settings for DynamoDB Streams on the table. These settings consist of: + // + // - StreamEnabled - Indicates whether DynamoDB Streams is to be enabled (true) + // or disabled (false). + // + // - StreamViewType - When an item in the table is modified, StreamViewType + // determines what information is written to the table's stream. Valid values for + // StreamViewType are: + // + // - KEYS_ONLY - Only the key attributes of the modified item are written to the + // stream. + // + // - NEW_IMAGE - The entire item, as it appears after it was modified, is written + // to the stream. + // + // - OLD_IMAGE - The entire item, as it appeared before it was modified, is + // written to the stream. + // + // - NEW_AND_OLD_IMAGES - Both the new and the old item images of the item are + // written to the stream. + StreamSpecification *types.StreamSpecification + + // The table class of the new table. Valid values are STANDARD and + // STANDARD_INFREQUENT_ACCESS . + TableClass types.TableClass + + // A list of key-value pairs to label the table. For more information, see [Tagging for DynamoDB]. + // + // [Tagging for DynamoDB]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html + Tags []types.Tag + + // Represents the warm throughput (in read units per second and write units per + // second) for creating a table. + WarmThroughput *types.WarmThroughput + + noSmithyDocumentSerde +} + +func (in *CreateTableInput) bindEndpointParams(p *EndpointParameters) { + + p.ResourceArn = in.TableName + +} + +// Represents the output of a CreateTable operation. +type CreateTableOutput struct { + + // Represents the properties of the table. + TableDescription *types.TableDescription + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCreateTableMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson10_serializeOpCreateTable{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpCreateTable{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "CreateTable"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpCreateTableDiscoverEndpointMiddleware(stack, options, c); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpCreateTableValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateTable(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addValidateResponseChecksum(stack, options); err != nil { + return err + } + if err = addAcceptEncodingGzip(stack, options); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func addOpCreateTableDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error { + return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{ + Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){ + func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) { + opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS + opt.Logger = o.Logger + }, + }, + DiscoverOperation: c.fetchOpCreateTableDiscoverEndpoint, + EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery, + EndpointDiscoveryRequired: false, + Region: o.Region, + }, "ResolveEndpointV2", middleware.After) +} + +func (c *Client) fetchOpCreateTableDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) { + input := getOperationInput(ctx) + in, ok := input.(*CreateTableInput) + if !ok { + return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input) + } + _ = in + + identifierMap := make(map[string]string, 0) + identifierMap["sdk#Region"] = region + + key := fmt.Sprintf("DynamoDB.%v", identifierMap) + + if v, ok := c.endpointCache.Get(key); ok { + return v, nil + } + + discoveryOperationInput := &DescribeEndpointsInput{} + + opt := internalEndpointDiscovery.DiscoverEndpointOptions{} + for _, fn := range optFns { + fn(&opt) + } + + go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt) + return internalEndpointDiscovery.WeightedAddress{}, nil +} + +func newServiceMetadataMiddleware_opCreateTable(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "CreateTable", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DeleteBackup.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DeleteBackup.go new file mode 100644 index 0000000000..799be66b63 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DeleteBackup.go @@ -0,0 +1,254 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package dynamodb + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes an existing backup of a table. +// +// You can call DeleteBackup at a maximum rate of 10 times per second. +func (c *Client) DeleteBackup(ctx context.Context, params *DeleteBackupInput, optFns ...func(*Options)) (*DeleteBackupOutput, error) { + if params == nil { + params = &DeleteBackupInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteBackup", params, optFns, c.addOperationDeleteBackupMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteBackupOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteBackupInput struct { + + // The ARN associated with the backup. + // + // This member is required. + BackupArn *string + + noSmithyDocumentSerde +} + +func (in *DeleteBackupInput) bindEndpointParams(p *EndpointParameters) { + + p.ResourceArn = in.BackupArn + +} + +type DeleteBackupOutput struct { + + // Contains the description of the backup created for the table. + BackupDescription *types.BackupDescription + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteBackupMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson10_serializeOpDeleteBackup{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpDeleteBackup{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBackup"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpDeleteBackupDiscoverEndpointMiddleware(stack, options, c); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpDeleteBackupValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBackup(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addValidateResponseChecksum(stack, options); err != nil { + return err + } + if err = addAcceptEncodingGzip(stack, options); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func addOpDeleteBackupDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error { + return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{ + Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){ + func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) { + opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS + opt.Logger = o.Logger + }, + }, + DiscoverOperation: c.fetchOpDeleteBackupDiscoverEndpoint, + EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery, + EndpointDiscoveryRequired: false, + Region: o.Region, + }, "ResolveEndpointV2", middleware.After) +} + +func (c *Client) fetchOpDeleteBackupDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) { + input := getOperationInput(ctx) + in, ok := input.(*DeleteBackupInput) + if !ok { + return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input) + } + _ = in + + identifierMap := make(map[string]string, 0) + identifierMap["sdk#Region"] = region + + key := fmt.Sprintf("DynamoDB.%v", identifierMap) + + if v, ok := c.endpointCache.Get(key); ok { + return v, nil + } + + discoveryOperationInput := &DescribeEndpointsInput{} + + opt := internalEndpointDiscovery.DiscoverEndpointOptions{} + for _, fn := range optFns { + fn(&opt) + } + + go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt) + return internalEndpointDiscovery.WeightedAddress{}, nil +} + +func newServiceMetadataMiddleware_opDeleteBackup(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DeleteBackup", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DeleteItem.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DeleteItem.go new file mode 100644 index 0000000000..386e2ecf1d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DeleteItem.go @@ -0,0 +1,450 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package dynamodb + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes a single item in a table by primary key. You can perform a conditional +// delete operation that deletes the item if it exists, or if it has an expected +// attribute value. +// +// In addition to deleting an item, you can also return the item's attribute +// values in the same operation, using the ReturnValues parameter. +// +// Unless you specify conditions, the DeleteItem is an idempotent operation; +// running it multiple times on the same item or attribute does not result in an +// error response. +// +// Conditional deletes are useful for deleting items only if specific conditions +// are met. If those conditions are met, DynamoDB performs the delete. Otherwise, +// the item is not deleted. +func (c *Client) DeleteItem(ctx context.Context, params *DeleteItemInput, optFns ...func(*Options)) (*DeleteItemOutput, error) { + if params == nil { + params = &DeleteItemInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteItem", params, optFns, c.addOperationDeleteItemMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteItemOutput) + out.ResultMetadata = metadata + return out, nil +} + +// Represents the input of a DeleteItem operation. +type DeleteItemInput struct { + + // A map of attribute names to AttributeValue objects, representing the primary + // key of the item to delete. + // + // For the primary key, you must provide all of the key attributes. For example, + // with a simple primary key, you only need to provide a value for the partition + // key. For a composite primary key, you must provide values for both the partition + // key and the sort key. + // + // This member is required. + Key map[string]types.AttributeValue + + // The name of the table from which to delete the item. You can also provide the + // Amazon Resource Name (ARN) of the table in this parameter. + // + // This member is required. + TableName *string + + // A condition that must be satisfied in order for a conditional DeleteItem to + // succeed. + // + // An expression can contain any of the following: + // + // - Functions: attribute_exists | attribute_not_exists | attribute_type | + // contains | begins_with | size + // + // These function names are case-sensitive. + // + // - Comparison operators: = | <> | < | > | <= | >= | BETWEEN | IN + // + // - Logical operators: AND | OR | NOT + // + // For more information about condition expressions, see [Condition Expressions] in the Amazon DynamoDB + // Developer Guide. + // + // [Condition Expressions]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html + ConditionExpression *string + + // This is a legacy parameter. Use ConditionExpression instead. For more + // information, see [ConditionalOperator]in the Amazon DynamoDB Developer Guide. + // + // [ConditionalOperator]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html + ConditionalOperator types.ConditionalOperator + + // This is a legacy parameter. Use ConditionExpression instead. For more + // information, see [Expected]in the Amazon DynamoDB Developer Guide. + // + // [Expected]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.Expected.html + Expected map[string]types.ExpectedAttributeValue + + // One or more substitution tokens for attribute names in an expression. The + // following are some use cases for using ExpressionAttributeNames : + // + // - To access an attribute whose name conflicts with a DynamoDB reserved word. + // + // - To create a placeholder for repeating occurrences of an attribute name in + // an expression. + // + // - To prevent special characters in an attribute name from being + // misinterpreted in an expression. + // + // Use the # character in an expression to dereference an attribute name. For + // example, consider the following attribute name: + // + // - Percentile + // + // The name of this attribute conflicts with a reserved word, so it cannot be used + // directly in an expression. (For the complete list of reserved words, see [Reserved Words]in the + // Amazon DynamoDB Developer Guide). To work around this, you could specify the + // following for ExpressionAttributeNames : + // + // - {"#P":"Percentile"} + // + // You could then use this substitution in an expression, as in this example: + // + // - #P = :val + // + // Tokens that begin with the : character are expression attribute values, which + // are placeholders for the actual value at runtime. + // + // For more information on expression attribute names, see [Specifying Item Attributes] in the Amazon DynamoDB + // Developer Guide. + // + // [Reserved Words]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html + // [Specifying Item Attributes]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html + ExpressionAttributeNames map[string]string + + // One or more values that can be substituted in an expression. + // + // Use the : (colon) character in an expression to dereference an attribute value. + // For example, suppose that you wanted to check whether the value of the + // ProductStatus attribute was one of the following: + // + // Available | Backordered | Discontinued + // + // You would first need to specify ExpressionAttributeValues as follows: + // + // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, + // ":disc":{"S":"Discontinued"} } + // + // You could then use these values in an expression, such as this: + // + // ProductStatus IN (:avail, :back, :disc) + // + // For more information on expression attribute values, see [Condition Expressions] in the Amazon + // DynamoDB Developer Guide. + // + // [Condition Expressions]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html + ExpressionAttributeValues map[string]types.AttributeValue + + // Determines the level of detail about either provisioned or on-demand throughput + // consumption that is returned in the response: + // + // - INDEXES - The response includes the aggregate ConsumedCapacity for the + // operation, together with ConsumedCapacity for each table and secondary index + // that was accessed. + // + // Note that some operations, such as GetItem and BatchGetItem , do not access any + // indexes at all. In these cases, specifying INDEXES will only return + // ConsumedCapacity information for table(s). + // + // - TOTAL - The response includes only the aggregate ConsumedCapacity for the + // operation. + // + // - NONE - No ConsumedCapacity details are included in the response. + ReturnConsumedCapacity types.ReturnConsumedCapacity + + // Determines whether item collection metrics are returned. If set to SIZE , the + // response includes statistics about item collections, if any, that were modified + // during the operation are returned in the response. If set to NONE (the + // default), no statistics are returned. + ReturnItemCollectionMetrics types.ReturnItemCollectionMetrics + + // Use ReturnValues if you want to get the item attributes as they appeared before + // they were deleted. For DeleteItem , the valid values are: + // + // - NONE - If ReturnValues is not specified, or if its value is NONE , then + // nothing is returned. (This setting is the default for ReturnValues .) + // + // - ALL_OLD - The content of the old item is returned. + // + // There is no additional cost associated with requesting a return value aside + // from the small network and processing overhead of receiving a larger response. + // No read capacity units are consumed. + // + // The ReturnValues parameter is used by several DynamoDB operations; however, + // DeleteItem does not recognize any values other than NONE or ALL_OLD . + ReturnValues types.ReturnValue + + // An optional parameter that returns the item attributes for a DeleteItem + // operation that failed a condition check. + // + // There is no additional cost associated with requesting a return value aside + // from the small network and processing overhead of receiving a larger response. + // No read capacity units are consumed. + ReturnValuesOnConditionCheckFailure types.ReturnValuesOnConditionCheckFailure + + noSmithyDocumentSerde +} + +func (in *DeleteItemInput) bindEndpointParams(p *EndpointParameters) { + + p.ResourceArn = in.TableName + +} + +// Represents the output of a DeleteItem operation. +type DeleteItemOutput struct { + + // A map of attribute names to AttributeValue objects, representing the item as it + // appeared before the DeleteItem operation. This map appears in the response only + // if ReturnValues was specified as ALL_OLD in the request. + Attributes map[string]types.AttributeValue + + // The capacity units consumed by the DeleteItem operation. The data returned + // includes the total provisioned throughput consumed, along with statistics for + // the table and any indexes involved in the operation. ConsumedCapacity is only + // returned if the ReturnConsumedCapacity parameter was specified. For more + // information, see [Provisioned capacity mode]in the Amazon DynamoDB Developer Guide. + // + // [Provisioned capacity mode]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/provisioned-capacity-mode.html + ConsumedCapacity *types.ConsumedCapacity + + // Information about item collections, if any, that were affected by the DeleteItem + // operation. ItemCollectionMetrics is only returned if the + // ReturnItemCollectionMetrics parameter was specified. If the table does not have + // any local secondary indexes, this information is not returned in the response. + // + // Each ItemCollectionMetrics element consists of: + // + // - ItemCollectionKey - The partition key value of the item collection. This is + // the same as the partition key value of the item itself. + // + // - SizeEstimateRangeGB - An estimate of item collection size, in gigabytes. + // This value is a two-element array containing a lower bound and an upper bound + // for the estimate. The estimate includes the size of all the items in the table, + // plus the size of all attributes projected into all of the local secondary + // indexes on that table. Use this estimate to measure whether a local secondary + // index is approaching its size limit. + // + // The estimate is subject to change over time; therefore, do not rely on the + // precision or accuracy of the estimate. + ItemCollectionMetrics *types.ItemCollectionMetrics + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteItemMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson10_serializeOpDeleteItem{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpDeleteItem{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteItem"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpDeleteItemDiscoverEndpointMiddleware(stack, options, c); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpDeleteItemValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteItem(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addValidateResponseChecksum(stack, options); err != nil { + return err + } + if err = addAcceptEncodingGzip(stack, options); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func addOpDeleteItemDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error { + return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{ + Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){ + func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) { + opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS + opt.Logger = o.Logger + }, + }, + DiscoverOperation: c.fetchOpDeleteItemDiscoverEndpoint, + EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery, + EndpointDiscoveryRequired: false, + Region: o.Region, + }, "ResolveEndpointV2", middleware.After) +} + +func (c *Client) fetchOpDeleteItemDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) { + input := getOperationInput(ctx) + in, ok := input.(*DeleteItemInput) + if !ok { + return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input) + } + _ = in + + identifierMap := make(map[string]string, 0) + identifierMap["sdk#Region"] = region + + key := fmt.Sprintf("DynamoDB.%v", identifierMap) + + if v, ok := c.endpointCache.Get(key); ok { + return v, nil + } + + discoveryOperationInput := &DescribeEndpointsInput{} + + opt := internalEndpointDiscovery.DiscoverEndpointOptions{} + for _, fn := range optFns { + fn(&opt) + } + + go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt) + return internalEndpointDiscovery.WeightedAddress{}, nil +} + +func newServiceMetadataMiddleware_opDeleteItem(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DeleteItem", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DeleteResourcePolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DeleteResourcePolicy.go new file mode 100644 index 0000000000..768fb574dc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DeleteResourcePolicy.go @@ -0,0 +1,281 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package dynamodb + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes the resource-based policy attached to the resource, which can be a +// table or stream. +// +// DeleteResourcePolicy is an idempotent operation; running it multiple times on +// the same resource doesn't result in an error response, unless you specify an +// ExpectedRevisionId , which will then return a PolicyNotFoundException . +// +// To make sure that you don't inadvertently lock yourself out of your own +// resources, the root principal in your Amazon Web Services account can perform +// DeleteResourcePolicy requests, even if your resource-based policy explicitly +// denies the root principal's access. +// +// DeleteResourcePolicy is an asynchronous operation. If you issue a +// GetResourcePolicy request immediately after running the DeleteResourcePolicy +// request, DynamoDB might still return the deleted policy. This is because the +// policy for your resource might not have been deleted yet. Wait for a few +// seconds, and then try the GetResourcePolicy request again. +func (c *Client) DeleteResourcePolicy(ctx context.Context, params *DeleteResourcePolicyInput, optFns ...func(*Options)) (*DeleteResourcePolicyOutput, error) { + if params == nil { + params = &DeleteResourcePolicyInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteResourcePolicy", params, optFns, c.addOperationDeleteResourcePolicyMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteResourcePolicyOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteResourcePolicyInput struct { + + // The Amazon Resource Name (ARN) of the DynamoDB resource from which the policy + // will be removed. The resources you can specify include tables and streams. If + // you remove the policy of a table, it will also remove the permissions for the + // table's indexes defined in that policy document. This is because index + // permissions are defined in the table's policy. + // + // This member is required. + ResourceArn *string + + // A string value that you can use to conditionally delete your policy. When you + // provide an expected revision ID, if the revision ID of the existing policy on + // the resource doesn't match or if there's no policy attached to the resource, the + // request will fail and return a PolicyNotFoundException . + ExpectedRevisionId *string + + noSmithyDocumentSerde +} + +func (in *DeleteResourcePolicyInput) bindEndpointParams(p *EndpointParameters) { + + p.ResourceArn = in.ResourceArn + +} + +type DeleteResourcePolicyOutput struct { + + // A unique string that represents the revision ID of the policy. If you're + // comparing revision IDs, make sure to always use string comparison logic. + // + // This value will be empty if you make a request against a resource without a + // policy. + RevisionId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteResourcePolicyMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson10_serializeOpDeleteResourcePolicy{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpDeleteResourcePolicy{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteResourcePolicy"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpDeleteResourcePolicyDiscoverEndpointMiddleware(stack, options, c); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpDeleteResourcePolicyValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteResourcePolicy(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addValidateResponseChecksum(stack, options); err != nil { + return err + } + if err = addAcceptEncodingGzip(stack, options); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func addOpDeleteResourcePolicyDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error { + return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{ + Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){ + func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) { + opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS + opt.Logger = o.Logger + }, + }, + DiscoverOperation: c.fetchOpDeleteResourcePolicyDiscoverEndpoint, + EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery, + EndpointDiscoveryRequired: false, + Region: o.Region, + }, "ResolveEndpointV2", middleware.After) +} + +func (c *Client) fetchOpDeleteResourcePolicyDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) { + input := getOperationInput(ctx) + in, ok := input.(*DeleteResourcePolicyInput) + if !ok { + return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input) + } + _ = in + + identifierMap := make(map[string]string, 0) + identifierMap["sdk#Region"] = region + + key := fmt.Sprintf("DynamoDB.%v", identifierMap) + + if v, ok := c.endpointCache.Get(key); ok { + return v, nil + } + + discoveryOperationInput := &DescribeEndpointsInput{} + + opt := internalEndpointDiscovery.DiscoverEndpointOptions{} + for _, fn := range optFns { + fn(&opt) + } + + go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt) + return internalEndpointDiscovery.WeightedAddress{}, nil +} + +func newServiceMetadataMiddleware_opDeleteResourcePolicy(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DeleteResourcePolicy", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DeleteTable.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DeleteTable.go new file mode 100644 index 0000000000..a9dbd30779 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DeleteTable.go @@ -0,0 +1,275 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package dynamodb + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// The DeleteTable operation deletes a table and all of its items. After a +// DeleteTable request, the specified table is in the DELETING state until +// DynamoDB completes the deletion. If the table is in the ACTIVE state, you can +// delete it. If a table is in CREATING or UPDATING states, then DynamoDB returns +// a ResourceInUseException . If the specified table does not exist, DynamoDB +// returns a ResourceNotFoundException . If table is already in the DELETING +// state, no error is returned. +// +// DynamoDB might continue to accept data read and write operations, such as +// GetItem and PutItem , on a table in the DELETING state until the table deletion +// is complete. For the full list of table states, see [TableStatus]. +// +// When you delete a table, any indexes on that table are also deleted. +// +// If you have DynamoDB Streams enabled on the table, then the corresponding +// stream on that table goes into the DISABLED state, and the stream is +// automatically deleted after 24 hours. +// +// Use the DescribeTable action to check the status of the table. +// +// [TableStatus]: https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_TableDescription.html#DDB-Type-TableDescription-TableStatus +func (c *Client) DeleteTable(ctx context.Context, params *DeleteTableInput, optFns ...func(*Options)) (*DeleteTableOutput, error) { + if params == nil { + params = &DeleteTableInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteTable", params, optFns, c.addOperationDeleteTableMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteTableOutput) + out.ResultMetadata = metadata + return out, nil +} + +// Represents the input of a DeleteTable operation. +type DeleteTableInput struct { + + // The name of the table to delete. You can also provide the Amazon Resource Name + // (ARN) of the table in this parameter. + // + // This member is required. + TableName *string + + noSmithyDocumentSerde +} + +func (in *DeleteTableInput) bindEndpointParams(p *EndpointParameters) { + + p.ResourceArn = in.TableName + +} + +// Represents the output of a DeleteTable operation. +type DeleteTableOutput struct { + + // Represents the properties of a table. + TableDescription *types.TableDescription + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteTableMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson10_serializeOpDeleteTable{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpDeleteTable{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteTable"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpDeleteTableDiscoverEndpointMiddleware(stack, options, c); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpDeleteTableValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteTable(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addValidateResponseChecksum(stack, options); err != nil { + return err + } + if err = addAcceptEncodingGzip(stack, options); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func addOpDeleteTableDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error { + return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{ + Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){ + func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) { + opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS + opt.Logger = o.Logger + }, + }, + DiscoverOperation: c.fetchOpDeleteTableDiscoverEndpoint, + EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery, + EndpointDiscoveryRequired: false, + Region: o.Region, + }, "ResolveEndpointV2", middleware.After) +} + +func (c *Client) fetchOpDeleteTableDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) { + input := getOperationInput(ctx) + in, ok := input.(*DeleteTableInput) + if !ok { + return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input) + } + _ = in + + identifierMap := make(map[string]string, 0) + identifierMap["sdk#Region"] = region + + key := fmt.Sprintf("DynamoDB.%v", identifierMap) + + if v, ok := c.endpointCache.Get(key); ok { + return v, nil + } + + discoveryOperationInput := &DescribeEndpointsInput{} + + opt := internalEndpointDiscovery.DiscoverEndpointOptions{} + for _, fn := range optFns { + fn(&opt) + } + + go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt) + return internalEndpointDiscovery.WeightedAddress{}, nil +} + +func newServiceMetadataMiddleware_opDeleteTable(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DeleteTable", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeBackup.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeBackup.go new file mode 100644 index 0000000000..be01e64070 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeBackup.go @@ -0,0 +1,254 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package dynamodb + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Describes an existing backup of a table. +// +// You can call DescribeBackup at a maximum rate of 10 times per second. +func (c *Client) DescribeBackup(ctx context.Context, params *DescribeBackupInput, optFns ...func(*Options)) (*DescribeBackupOutput, error) { + if params == nil { + params = &DescribeBackupInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DescribeBackup", params, optFns, c.addOperationDescribeBackupMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DescribeBackupOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DescribeBackupInput struct { + + // The Amazon Resource Name (ARN) associated with the backup. + // + // This member is required. + BackupArn *string + + noSmithyDocumentSerde +} + +func (in *DescribeBackupInput) bindEndpointParams(p *EndpointParameters) { + + p.ResourceArn = in.BackupArn + +} + +type DescribeBackupOutput struct { + + // Contains the description of the backup created for the table. + BackupDescription *types.BackupDescription + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDescribeBackupMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson10_serializeOpDescribeBackup{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpDescribeBackup{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "DescribeBackup"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpDescribeBackupDiscoverEndpointMiddleware(stack, options, c); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpDescribeBackupValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeBackup(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addValidateResponseChecksum(stack, options); err != nil { + return err + } + if err = addAcceptEncodingGzip(stack, options); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func addOpDescribeBackupDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error { + return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{ + Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){ + func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) { + opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS + opt.Logger = o.Logger + }, + }, + DiscoverOperation: c.fetchOpDescribeBackupDiscoverEndpoint, + EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery, + EndpointDiscoveryRequired: false, + Region: o.Region, + }, "ResolveEndpointV2", middleware.After) +} + +func (c *Client) fetchOpDescribeBackupDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) { + input := getOperationInput(ctx) + in, ok := input.(*DescribeBackupInput) + if !ok { + return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input) + } + _ = in + + identifierMap := make(map[string]string, 0) + identifierMap["sdk#Region"] = region + + key := fmt.Sprintf("DynamoDB.%v", identifierMap) + + if v, ok := c.endpointCache.Get(key); ok { + return v, nil + } + + discoveryOperationInput := &DescribeEndpointsInput{} + + opt := internalEndpointDiscovery.DiscoverEndpointOptions{} + for _, fn := range optFns { + fn(&opt) + } + + go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt) + return internalEndpointDiscovery.WeightedAddress{}, nil +} + +func newServiceMetadataMiddleware_opDescribeBackup(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DescribeBackup", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeContinuousBackups.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeContinuousBackups.go new file mode 100644 index 0000000000..b814e0517f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeContinuousBackups.go @@ -0,0 +1,270 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package dynamodb + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Checks the status of continuous backups and point in time recovery on the +// specified table. Continuous backups are ENABLED on all tables at table +// creation. If point in time recovery is enabled, PointInTimeRecoveryStatus will +// be set to ENABLED. +// +// After continuous backups and point in time recovery are enabled, you can +// restore to any point in time within EarliestRestorableDateTime and +// LatestRestorableDateTime . +// +// LatestRestorableDateTime is typically 5 minutes before the current time. You +// can restore your table to any point in time in the last 35 days. You can set the +// recovery period to any value between 1 and 35 days. +// +// You can call DescribeContinuousBackups at a maximum rate of 10 times per second. +func (c *Client) DescribeContinuousBackups(ctx context.Context, params *DescribeContinuousBackupsInput, optFns ...func(*Options)) (*DescribeContinuousBackupsOutput, error) { + if params == nil { + params = &DescribeContinuousBackupsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DescribeContinuousBackups", params, optFns, c.addOperationDescribeContinuousBackupsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DescribeContinuousBackupsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DescribeContinuousBackupsInput struct { + + // Name of the table for which the customer wants to check the continuous backups + // and point in time recovery settings. + // + // You can also provide the Amazon Resource Name (ARN) of the table in this + // parameter. + // + // This member is required. + TableName *string + + noSmithyDocumentSerde +} + +func (in *DescribeContinuousBackupsInput) bindEndpointParams(p *EndpointParameters) { + + p.ResourceArn = in.TableName + +} + +type DescribeContinuousBackupsOutput struct { + + // Represents the continuous backups and point in time recovery settings on the + // table. + ContinuousBackupsDescription *types.ContinuousBackupsDescription + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDescribeContinuousBackupsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson10_serializeOpDescribeContinuousBackups{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpDescribeContinuousBackups{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "DescribeContinuousBackups"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpDescribeContinuousBackupsDiscoverEndpointMiddleware(stack, options, c); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpDescribeContinuousBackupsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeContinuousBackups(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addValidateResponseChecksum(stack, options); err != nil { + return err + } + if err = addAcceptEncodingGzip(stack, options); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func addOpDescribeContinuousBackupsDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error { + return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{ + Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){ + func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) { + opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS + opt.Logger = o.Logger + }, + }, + DiscoverOperation: c.fetchOpDescribeContinuousBackupsDiscoverEndpoint, + EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery, + EndpointDiscoveryRequired: false, + Region: o.Region, + }, "ResolveEndpointV2", middleware.After) +} + +func (c *Client) fetchOpDescribeContinuousBackupsDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) { + input := getOperationInput(ctx) + in, ok := input.(*DescribeContinuousBackupsInput) + if !ok { + return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input) + } + _ = in + + identifierMap := make(map[string]string, 0) + identifierMap["sdk#Region"] = region + + key := fmt.Sprintf("DynamoDB.%v", identifierMap) + + if v, ok := c.endpointCache.Get(key); ok { + return v, nil + } + + discoveryOperationInput := &DescribeEndpointsInput{} + + opt := internalEndpointDiscovery.DiscoverEndpointOptions{} + for _, fn := range optFns { + fn(&opt) + } + + go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt) + return internalEndpointDiscovery.WeightedAddress{}, nil +} + +func newServiceMetadataMiddleware_opDescribeContinuousBackups(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DescribeContinuousBackups", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeContributorInsights.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeContributorInsights.go new file mode 100644 index 0000000000..338ff4df13 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeContributorInsights.go @@ -0,0 +1,246 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package dynamodb + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "time" +) + +// Returns information about contributor insights for a given table or global +// secondary index. +func (c *Client) DescribeContributorInsights(ctx context.Context, params *DescribeContributorInsightsInput, optFns ...func(*Options)) (*DescribeContributorInsightsOutput, error) { + if params == nil { + params = &DescribeContributorInsightsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DescribeContributorInsights", params, optFns, c.addOperationDescribeContributorInsightsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DescribeContributorInsightsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DescribeContributorInsightsInput struct { + + // The name of the table to describe. You can also provide the Amazon Resource + // Name (ARN) of the table in this parameter. + // + // This member is required. + TableName *string + + // The name of the global secondary index to describe, if applicable. + IndexName *string + + noSmithyDocumentSerde +} + +func (in *DescribeContributorInsightsInput) bindEndpointParams(p *EndpointParameters) { + + p.ResourceArn = in.TableName + +} + +type DescribeContributorInsightsOutput struct { + + // The mode of CloudWatch Contributor Insights for DynamoDB that determines which + // events are emitted. Can be set to track all access and throttled events or + // throttled events only. + ContributorInsightsMode types.ContributorInsightsMode + + // List of names of the associated contributor insights rules. + ContributorInsightsRuleList []string + + // Current status of contributor insights. + ContributorInsightsStatus types.ContributorInsightsStatus + + // Returns information about the last failure that was encountered. + // + // The most common exceptions for a FAILED status are: + // + // - LimitExceededException - Per-account Amazon CloudWatch Contributor Insights + // rule limit reached. Please disable Contributor Insights for other tables/indexes + // OR disable Contributor Insights rules before retrying. + // + // - AccessDeniedException - Amazon CloudWatch Contributor Insights rules cannot + // be modified due to insufficient permissions. + // + // - AccessDeniedException - Failed to create service-linked role for + // Contributor Insights due to insufficient permissions. + // + // - InternalServerError - Failed to create Amazon CloudWatch Contributor + // Insights rules. Please retry request. + FailureException *types.FailureException + + // The name of the global secondary index being described. + IndexName *string + + // Timestamp of the last time the status was changed. + LastUpdateDateTime *time.Time + + // The name of the table being described. + TableName *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDescribeContributorInsightsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson10_serializeOpDescribeContributorInsights{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpDescribeContributorInsights{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "DescribeContributorInsights"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpDescribeContributorInsightsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeContributorInsights(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addValidateResponseChecksum(stack, options); err != nil { + return err + } + if err = addAcceptEncodingGzip(stack, options); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDescribeContributorInsights(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DescribeContributorInsights", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeEndpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeEndpoints.go new file mode 100644 index 0000000000..0e7c17b82f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeEndpoints.go @@ -0,0 +1,195 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package dynamodb + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns the regional endpoint information. For more information on policy +// permissions, please see [Internetwork traffic privacy]. +// +// [Internetwork traffic privacy]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/inter-network-traffic-privacy.html#inter-network-traffic-DescribeEndpoints +func (c *Client) DescribeEndpoints(ctx context.Context, params *DescribeEndpointsInput, optFns ...func(*Options)) (*DescribeEndpointsOutput, error) { + if params == nil { + params = &DescribeEndpointsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DescribeEndpoints", params, optFns, c.addOperationDescribeEndpointsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DescribeEndpointsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DescribeEndpointsInput struct { + noSmithyDocumentSerde +} + +type DescribeEndpointsOutput struct { + + // List of endpoints. + // + // This member is required. + Endpoints []types.Endpoint + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDescribeEndpointsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson10_serializeOpDescribeEndpoints{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpDescribeEndpoints{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "DescribeEndpoints"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeEndpoints(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addValidateResponseChecksum(stack, options); err != nil { + return err + } + if err = addAcceptEncodingGzip(stack, options); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDescribeEndpoints(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DescribeEndpoints", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeExport.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeExport.go new file mode 100644 index 0000000000..22eba3e4ab --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeExport.go @@ -0,0 +1,205 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package dynamodb + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Describes an existing table export. +func (c *Client) DescribeExport(ctx context.Context, params *DescribeExportInput, optFns ...func(*Options)) (*DescribeExportOutput, error) { + if params == nil { + params = &DescribeExportInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DescribeExport", params, optFns, c.addOperationDescribeExportMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DescribeExportOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DescribeExportInput struct { + + // The Amazon Resource Name (ARN) associated with the export. + // + // This member is required. + ExportArn *string + + noSmithyDocumentSerde +} + +func (in *DescribeExportInput) bindEndpointParams(p *EndpointParameters) { + + p.ResourceArn = in.ExportArn + +} + +type DescribeExportOutput struct { + + // Represents the properties of the export. + ExportDescription *types.ExportDescription + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDescribeExportMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson10_serializeOpDescribeExport{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpDescribeExport{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "DescribeExport"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpDescribeExportValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeExport(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addValidateResponseChecksum(stack, options); err != nil { + return err + } + if err = addAcceptEncodingGzip(stack, options); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDescribeExport(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DescribeExport", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeGlobalTable.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeGlobalTable.go new file mode 100644 index 0000000000..95b1fb4e63 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeGlobalTable.go @@ -0,0 +1,264 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package dynamodb + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns information about the specified global table. +// +// This documentation is for version 2017.11.29 (Legacy) of global tables, which +// should be avoided for new global tables. Customers should use [Global Tables version 2019.11.21 (Current)]when possible, +// because it provides greater flexibility, higher efficiency, and consumes less +// write capacity than 2017.11.29 (Legacy). +// +// To determine which version you're using, see [Determining the global table version you are using]. To update existing global tables +// from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see [Upgrading global tables]. +// +// [Global Tables version 2019.11.21 (Current)]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GlobalTables.html +// [Upgrading global tables]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/V2globaltables_upgrade.html +// [Determining the global table version you are using]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.DetermineVersion.html +func (c *Client) DescribeGlobalTable(ctx context.Context, params *DescribeGlobalTableInput, optFns ...func(*Options)) (*DescribeGlobalTableOutput, error) { + if params == nil { + params = &DescribeGlobalTableInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DescribeGlobalTable", params, optFns, c.addOperationDescribeGlobalTableMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DescribeGlobalTableOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DescribeGlobalTableInput struct { + + // The name of the global table. + // + // This member is required. + GlobalTableName *string + + noSmithyDocumentSerde +} + +func (in *DescribeGlobalTableInput) bindEndpointParams(p *EndpointParameters) { + + p.ResourceArn = in.GlobalTableName + +} + +type DescribeGlobalTableOutput struct { + + // Contains the details of the global table. + GlobalTableDescription *types.GlobalTableDescription + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDescribeGlobalTableMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson10_serializeOpDescribeGlobalTable{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpDescribeGlobalTable{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "DescribeGlobalTable"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpDescribeGlobalTableDiscoverEndpointMiddleware(stack, options, c); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpDescribeGlobalTableValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeGlobalTable(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addValidateResponseChecksum(stack, options); err != nil { + return err + } + if err = addAcceptEncodingGzip(stack, options); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func addOpDescribeGlobalTableDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error { + return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{ + Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){ + func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) { + opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS + opt.Logger = o.Logger + }, + }, + DiscoverOperation: c.fetchOpDescribeGlobalTableDiscoverEndpoint, + EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery, + EndpointDiscoveryRequired: false, + Region: o.Region, + }, "ResolveEndpointV2", middleware.After) +} + +func (c *Client) fetchOpDescribeGlobalTableDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) { + input := getOperationInput(ctx) + in, ok := input.(*DescribeGlobalTableInput) + if !ok { + return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input) + } + _ = in + + identifierMap := make(map[string]string, 0) + identifierMap["sdk#Region"] = region + + key := fmt.Sprintf("DynamoDB.%v", identifierMap) + + if v, ok := c.endpointCache.Get(key); ok { + return v, nil + } + + discoveryOperationInput := &DescribeEndpointsInput{} + + opt := internalEndpointDiscovery.DiscoverEndpointOptions{} + for _, fn := range optFns { + fn(&opt) + } + + go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt) + return internalEndpointDiscovery.WeightedAddress{}, nil +} + +func newServiceMetadataMiddleware_opDescribeGlobalTable(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DescribeGlobalTable", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeGlobalTableSettings.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeGlobalTableSettings.go new file mode 100644 index 0000000000..78378b4c88 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeGlobalTableSettings.go @@ -0,0 +1,267 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package dynamodb + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Describes Region-specific settings for a global table. +// +// This documentation is for version 2017.11.29 (Legacy) of global tables, which +// should be avoided for new global tables. Customers should use [Global Tables version 2019.11.21 (Current)]when possible, +// because it provides greater flexibility, higher efficiency, and consumes less +// write capacity than 2017.11.29 (Legacy). +// +// To determine which version you're using, see [Determining the global table version you are using]. To update existing global tables +// from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see [Upgrading global tables]. +// +// [Global Tables version 2019.11.21 (Current)]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GlobalTables.html +// [Upgrading global tables]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/V2globaltables_upgrade.html +// [Determining the global table version you are using]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.DetermineVersion.html +func (c *Client) DescribeGlobalTableSettings(ctx context.Context, params *DescribeGlobalTableSettingsInput, optFns ...func(*Options)) (*DescribeGlobalTableSettingsOutput, error) { + if params == nil { + params = &DescribeGlobalTableSettingsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DescribeGlobalTableSettings", params, optFns, c.addOperationDescribeGlobalTableSettingsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DescribeGlobalTableSettingsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DescribeGlobalTableSettingsInput struct { + + // The name of the global table to describe. + // + // This member is required. + GlobalTableName *string + + noSmithyDocumentSerde +} + +func (in *DescribeGlobalTableSettingsInput) bindEndpointParams(p *EndpointParameters) { + + p.ResourceArn = in.GlobalTableName + +} + +type DescribeGlobalTableSettingsOutput struct { + + // The name of the global table. + GlobalTableName *string + + // The Region-specific settings for the global table. + ReplicaSettings []types.ReplicaSettingsDescription + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDescribeGlobalTableSettingsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson10_serializeOpDescribeGlobalTableSettings{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpDescribeGlobalTableSettings{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "DescribeGlobalTableSettings"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpDescribeGlobalTableSettingsDiscoverEndpointMiddleware(stack, options, c); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpDescribeGlobalTableSettingsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeGlobalTableSettings(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addValidateResponseChecksum(stack, options); err != nil { + return err + } + if err = addAcceptEncodingGzip(stack, options); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func addOpDescribeGlobalTableSettingsDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error { + return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{ + Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){ + func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) { + opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS + opt.Logger = o.Logger + }, + }, + DiscoverOperation: c.fetchOpDescribeGlobalTableSettingsDiscoverEndpoint, + EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery, + EndpointDiscoveryRequired: false, + Region: o.Region, + }, "ResolveEndpointV2", middleware.After) +} + +func (c *Client) fetchOpDescribeGlobalTableSettingsDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) { + input := getOperationInput(ctx) + in, ok := input.(*DescribeGlobalTableSettingsInput) + if !ok { + return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input) + } + _ = in + + identifierMap := make(map[string]string, 0) + identifierMap["sdk#Region"] = region + + key := fmt.Sprintf("DynamoDB.%v", identifierMap) + + if v, ok := c.endpointCache.Get(key); ok { + return v, nil + } + + discoveryOperationInput := &DescribeEndpointsInput{} + + opt := internalEndpointDiscovery.DiscoverEndpointOptions{} + for _, fn := range optFns { + fn(&opt) + } + + go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt) + return internalEndpointDiscovery.WeightedAddress{}, nil +} + +func newServiceMetadataMiddleware_opDescribeGlobalTableSettings(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DescribeGlobalTableSettings", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeImport.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeImport.go new file mode 100644 index 0000000000..a4c70e5d5b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeImport.go @@ -0,0 +1,209 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package dynamodb + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Represents the properties of the import. +func (c *Client) DescribeImport(ctx context.Context, params *DescribeImportInput, optFns ...func(*Options)) (*DescribeImportOutput, error) { + if params == nil { + params = &DescribeImportInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DescribeImport", params, optFns, c.addOperationDescribeImportMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DescribeImportOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DescribeImportInput struct { + + // The Amazon Resource Name (ARN) associated with the table you're importing to. + // + // This member is required. + ImportArn *string + + noSmithyDocumentSerde +} + +func (in *DescribeImportInput) bindEndpointParams(p *EndpointParameters) { + + p.ResourceArn = in.ImportArn + +} + +type DescribeImportOutput struct { + + // Represents the properties of the table created for the import, and parameters + // of the import. The import parameters include import status, how many items were + // processed, and how many errors were encountered. + // + // This member is required. + ImportTableDescription *types.ImportTableDescription + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDescribeImportMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson10_serializeOpDescribeImport{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpDescribeImport{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "DescribeImport"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpDescribeImportValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeImport(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addValidateResponseChecksum(stack, options); err != nil { + return err + } + if err = addAcceptEncodingGzip(stack, options); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDescribeImport(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DescribeImport", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeKinesisStreamingDestination.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeKinesisStreamingDestination.go new file mode 100644 index 0000000000..dd882cfe73 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeKinesisStreamingDestination.go @@ -0,0 +1,256 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package dynamodb + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns information about the status of Kinesis streaming. +func (c *Client) DescribeKinesisStreamingDestination(ctx context.Context, params *DescribeKinesisStreamingDestinationInput, optFns ...func(*Options)) (*DescribeKinesisStreamingDestinationOutput, error) { + if params == nil { + params = &DescribeKinesisStreamingDestinationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DescribeKinesisStreamingDestination", params, optFns, c.addOperationDescribeKinesisStreamingDestinationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DescribeKinesisStreamingDestinationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DescribeKinesisStreamingDestinationInput struct { + + // The name of the table being described. You can also provide the Amazon Resource + // Name (ARN) of the table in this parameter. + // + // This member is required. + TableName *string + + noSmithyDocumentSerde +} + +func (in *DescribeKinesisStreamingDestinationInput) bindEndpointParams(p *EndpointParameters) { + + p.ResourceArn = in.TableName + +} + +type DescribeKinesisStreamingDestinationOutput struct { + + // The list of replica structures for the table being described. + KinesisDataStreamDestinations []types.KinesisDataStreamDestination + + // The name of the table being described. + TableName *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDescribeKinesisStreamingDestinationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson10_serializeOpDescribeKinesisStreamingDestination{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpDescribeKinesisStreamingDestination{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "DescribeKinesisStreamingDestination"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpDescribeKinesisStreamingDestinationDiscoverEndpointMiddleware(stack, options, c); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpDescribeKinesisStreamingDestinationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeKinesisStreamingDestination(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addValidateResponseChecksum(stack, options); err != nil { + return err + } + if err = addAcceptEncodingGzip(stack, options); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func addOpDescribeKinesisStreamingDestinationDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error { + return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{ + Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){ + func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) { + opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS + opt.Logger = o.Logger + }, + }, + DiscoverOperation: c.fetchOpDescribeKinesisStreamingDestinationDiscoverEndpoint, + EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery, + EndpointDiscoveryRequired: false, + Region: o.Region, + }, "ResolveEndpointV2", middleware.After) +} + +func (c *Client) fetchOpDescribeKinesisStreamingDestinationDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) { + input := getOperationInput(ctx) + in, ok := input.(*DescribeKinesisStreamingDestinationInput) + if !ok { + return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input) + } + _ = in + + identifierMap := make(map[string]string, 0) + identifierMap["sdk#Region"] = region + + key := fmt.Sprintf("DynamoDB.%v", identifierMap) + + if v, ok := c.endpointCache.Get(key); ok { + return v, nil + } + + discoveryOperationInput := &DescribeEndpointsInput{} + + opt := internalEndpointDiscovery.DiscoverEndpointOptions{} + for _, fn := range optFns { + fn(&opt) + } + + go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt) + return internalEndpointDiscovery.WeightedAddress{}, nil +} + +func newServiceMetadataMiddleware_opDescribeKinesisStreamingDestination(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DescribeKinesisStreamingDestination", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeLimits.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeLimits.go new file mode 100644 index 0000000000..915a2a018a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeLimits.go @@ -0,0 +1,312 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package dynamodb + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns the current provisioned-capacity quotas for your Amazon Web Services +// account in a Region, both for the Region as a whole and for any one DynamoDB +// table that you create there. +// +// When you establish an Amazon Web Services account, the account has initial +// quotas on the maximum read capacity units and write capacity units that you can +// provision across all of your DynamoDB tables in a given Region. Also, there are +// per-table quotas that apply when you create a table there. For more information, +// see [Service, Account, and Table Quotas]page in the Amazon DynamoDB Developer Guide. +// +// Although you can increase these quotas by filing a case at [Amazon Web Services Support Center], obtaining the +// increase is not instantaneous. The DescribeLimits action lets you write code to +// compare the capacity you are currently using to those quotas imposed by your +// account so that you have enough time to apply for an increase before you hit a +// quota. +// +// For example, you could use one of the Amazon Web Services SDKs to do the +// following: +// +// - Call DescribeLimits for a particular Region to obtain your current account +// quotas on provisioned capacity there. +// +// - Create a variable to hold the aggregate read capacity units provisioned for +// all your tables in that Region, and one to hold the aggregate write capacity +// units. Zero them both. +// +// - Call ListTables to obtain a list of all your DynamoDB tables. +// +// - For each table name listed by ListTables , do the following: +// +// - Call DescribeTable with the table name. +// +// - Use the data returned by DescribeTable to add the read capacity units and +// write capacity units provisioned for the table itself to your variables. +// +// - If the table has one or more global secondary indexes (GSIs), loop over +// these GSIs and add their provisioned capacity values to your variables as well. +// +// - Report the account quotas for that Region returned by DescribeLimits , along +// with the total current provisioned capacity levels you have calculated. +// +// This will let you see whether you are getting close to your account-level +// quotas. +// +// The per-table quotas apply only when you are creating a new table. They +// restrict the sum of the provisioned capacity of the new table itself and all its +// global secondary indexes. +// +// For existing tables and their GSIs, DynamoDB doesn't let you increase +// provisioned capacity extremely rapidly, but the only quota that applies is that +// the aggregate provisioned capacity over all your tables and GSIs cannot exceed +// either of the per-account quotas. +// +// DescribeLimits should only be called periodically. You can expect throttling +// errors if you call it more than once in a minute. +// +// The DescribeLimits Request element has no content. +// +// [Service, Account, and Table Quotas]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html +// [Amazon Web Services Support Center]: https://console.aws.amazon.com/support/home#/ +func (c *Client) DescribeLimits(ctx context.Context, params *DescribeLimitsInput, optFns ...func(*Options)) (*DescribeLimitsOutput, error) { + if params == nil { + params = &DescribeLimitsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DescribeLimits", params, optFns, c.addOperationDescribeLimitsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DescribeLimitsOutput) + out.ResultMetadata = metadata + return out, nil +} + +// Represents the input of a DescribeLimits operation. Has no content. +type DescribeLimitsInput struct { + noSmithyDocumentSerde +} + +// Represents the output of a DescribeLimits operation. +type DescribeLimitsOutput struct { + + // The maximum total read capacity units that your account allows you to provision + // across all of your tables in this Region. + AccountMaxReadCapacityUnits *int64 + + // The maximum total write capacity units that your account allows you to + // provision across all of your tables in this Region. + AccountMaxWriteCapacityUnits *int64 + + // The maximum read capacity units that your account allows you to provision for a + // new table that you are creating in this Region, including the read capacity + // units provisioned for its global secondary indexes (GSIs). + TableMaxReadCapacityUnits *int64 + + // The maximum write capacity units that your account allows you to provision for + // a new table that you are creating in this Region, including the write capacity + // units provisioned for its global secondary indexes (GSIs). + TableMaxWriteCapacityUnits *int64 + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDescribeLimitsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson10_serializeOpDescribeLimits{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpDescribeLimits{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "DescribeLimits"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpDescribeLimitsDiscoverEndpointMiddleware(stack, options, c); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeLimits(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addValidateResponseChecksum(stack, options); err != nil { + return err + } + if err = addAcceptEncodingGzip(stack, options); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func addOpDescribeLimitsDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error { + return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{ + Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){ + func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) { + opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS + opt.Logger = o.Logger + }, + }, + DiscoverOperation: c.fetchOpDescribeLimitsDiscoverEndpoint, + EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery, + EndpointDiscoveryRequired: false, + Region: o.Region, + }, "ResolveEndpointV2", middleware.After) +} + +func (c *Client) fetchOpDescribeLimitsDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) { + input := getOperationInput(ctx) + in, ok := input.(*DescribeLimitsInput) + if !ok { + return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input) + } + _ = in + + identifierMap := make(map[string]string, 0) + identifierMap["sdk#Region"] = region + + key := fmt.Sprintf("DynamoDB.%v", identifierMap) + + if v, ok := c.endpointCache.Get(key); ok { + return v, nil + } + + discoveryOperationInput := &DescribeEndpointsInput{} + + opt := internalEndpointDiscovery.DiscoverEndpointOptions{} + for _, fn := range optFns { + fn(&opt) + } + + go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt) + return internalEndpointDiscovery.WeightedAddress{}, nil +} + +func newServiceMetadataMiddleware_opDescribeLimits(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DescribeLimits", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeTable.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeTable.go new file mode 100644 index 0000000000..4a0cd9d29d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeTable.go @@ -0,0 +1,631 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package dynamodb + +import ( + "context" + "errors" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery" + "github.com/aws/smithy-go/middleware" + smithytime "github.com/aws/smithy-go/time" + smithyhttp "github.com/aws/smithy-go/transport/http" + smithywaiter "github.com/aws/smithy-go/waiter" + "time" +) + +// Returns information about the table, including the current status of the table, +// when it was created, the primary key schema, and any indexes on the table. +// +// If you issue a DescribeTable request immediately after a CreateTable request, +// DynamoDB might return a ResourceNotFoundException . This is because +// DescribeTable uses an eventually consistent query, and the metadata for your +// table might not be available at that moment. Wait for a few seconds, and then +// try the DescribeTable request again. +func (c *Client) DescribeTable(ctx context.Context, params *DescribeTableInput, optFns ...func(*Options)) (*DescribeTableOutput, error) { + if params == nil { + params = &DescribeTableInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DescribeTable", params, optFns, c.addOperationDescribeTableMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DescribeTableOutput) + out.ResultMetadata = metadata + return out, nil +} + +// Represents the input of a DescribeTable operation. +type DescribeTableInput struct { + + // The name of the table to describe. You can also provide the Amazon Resource + // Name (ARN) of the table in this parameter. + // + // This member is required. + TableName *string + + noSmithyDocumentSerde +} + +func (in *DescribeTableInput) bindEndpointParams(p *EndpointParameters) { + + p.ResourceArn = in.TableName + +} + +// Represents the output of a DescribeTable operation. +type DescribeTableOutput struct { + + // The properties of the table. + Table *types.TableDescription + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDescribeTableMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson10_serializeOpDescribeTable{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpDescribeTable{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "DescribeTable"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpDescribeTableDiscoverEndpointMiddleware(stack, options, c); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpDescribeTableValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeTable(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addValidateResponseChecksum(stack, options); err != nil { + return err + } + if err = addAcceptEncodingGzip(stack, options); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +// TableExistsWaiterOptions are waiter options for TableExistsWaiter +type TableExistsWaiterOptions struct { + + // Set of options to modify how an operation is invoked. These apply to all + // operations invoked for this client. Use functional options on operation call to + // modify this list for per operation behavior. + // + // Passing options here is functionally equivalent to passing values to this + // config's ClientOptions field that extend the inner client's APIOptions directly. + APIOptions []func(*middleware.Stack) error + + // Functional options to be passed to all operations invoked by this client. + // + // Function values that modify the inner APIOptions are applied after the waiter + // config's own APIOptions modifiers. + ClientOptions []func(*Options) + + // MinDelay is the minimum amount of time to delay between retries. If unset, + // TableExistsWaiter will use default minimum delay of 20 seconds. Note that + // MinDelay must resolve to a value lesser than or equal to the MaxDelay. + MinDelay time.Duration + + // MaxDelay is the maximum amount of time to delay between retries. If unset or + // set to zero, TableExistsWaiter will use default max delay of 120 seconds. Note + // that MaxDelay must resolve to value greater than or equal to the MinDelay. + MaxDelay time.Duration + + // LogWaitAttempts is used to enable logging for waiter retry attempts + LogWaitAttempts bool + + // Retryable is function that can be used to override the service defined + // waiter-behavior based on operation output, or returned error. This function is + // used by the waiter to decide if a state is retryable or a terminal state. + // + // By default service-modeled logic will populate this option. This option can + // thus be used to define a custom waiter state with fall-back to service-modeled + // waiter state mutators.The function returns an error in case of a failure state. + // In case of retry state, this function returns a bool value of true and nil + // error, while in case of success it returns a bool value of false and nil error. + Retryable func(context.Context, *DescribeTableInput, *DescribeTableOutput, error) (bool, error) +} + +// TableExistsWaiter defines the waiters for TableExists +type TableExistsWaiter struct { + client DescribeTableAPIClient + + options TableExistsWaiterOptions +} + +// NewTableExistsWaiter constructs a TableExistsWaiter. +func NewTableExistsWaiter(client DescribeTableAPIClient, optFns ...func(*TableExistsWaiterOptions)) *TableExistsWaiter { + options := TableExistsWaiterOptions{} + options.MinDelay = 20 * time.Second + options.MaxDelay = 120 * time.Second + options.Retryable = tableExistsStateRetryable + + for _, fn := range optFns { + fn(&options) + } + return &TableExistsWaiter{ + client: client, + options: options, + } +} + +// Wait calls the waiter function for TableExists waiter. The maxWaitDur is the +// maximum wait duration the waiter will wait. The maxWaitDur is required and must +// be greater than zero. +func (w *TableExistsWaiter) Wait(ctx context.Context, params *DescribeTableInput, maxWaitDur time.Duration, optFns ...func(*TableExistsWaiterOptions)) error { + _, err := w.WaitForOutput(ctx, params, maxWaitDur, optFns...) + return err +} + +// WaitForOutput calls the waiter function for TableExists waiter and returns the +// output of the successful operation. The maxWaitDur is the maximum wait duration +// the waiter will wait. The maxWaitDur is required and must be greater than zero. +func (w *TableExistsWaiter) WaitForOutput(ctx context.Context, params *DescribeTableInput, maxWaitDur time.Duration, optFns ...func(*TableExistsWaiterOptions)) (*DescribeTableOutput, error) { + if maxWaitDur <= 0 { + return nil, fmt.Errorf("maximum wait time for waiter must be greater than zero") + } + + options := w.options + for _, fn := range optFns { + fn(&options) + } + + if options.MaxDelay <= 0 { + options.MaxDelay = 120 * time.Second + } + + if options.MinDelay > options.MaxDelay { + return nil, fmt.Errorf("minimum waiter delay %v must be lesser than or equal to maximum waiter delay of %v.", options.MinDelay, options.MaxDelay) + } + + ctx, cancelFn := context.WithTimeout(ctx, maxWaitDur) + defer cancelFn() + + logger := smithywaiter.Logger{} + remainingTime := maxWaitDur + + var attempt int64 + for { + + attempt++ + apiOptions := options.APIOptions + start := time.Now() + + if options.LogWaitAttempts { + logger.Attempt = attempt + apiOptions = append([]func(*middleware.Stack) error{}, options.APIOptions...) + apiOptions = append(apiOptions, logger.AddLogger) + } + + out, err := w.client.DescribeTable(ctx, params, func(o *Options) { + baseOpts := []func(*Options){ + addIsWaiterUserAgent, + } + o.APIOptions = append(o.APIOptions, apiOptions...) + for _, opt := range baseOpts { + opt(o) + } + for _, opt := range options.ClientOptions { + opt(o) + } + }) + + retryable, err := options.Retryable(ctx, params, out, err) + if err != nil { + return nil, err + } + if !retryable { + return out, nil + } + + remainingTime -= time.Since(start) + if remainingTime < options.MinDelay || remainingTime <= 0 { + break + } + + // compute exponential backoff between waiter retries + delay, err := smithywaiter.ComputeDelay( + attempt, options.MinDelay, options.MaxDelay, remainingTime, + ) + if err != nil { + return nil, fmt.Errorf("error computing waiter delay, %w", err) + } + + remainingTime -= delay + // sleep for the delay amount before invoking a request + if err := smithytime.SleepWithContext(ctx, delay); err != nil { + return nil, fmt.Errorf("request cancelled while waiting, %w", err) + } + } + return nil, fmt.Errorf("exceeded max wait time for TableExists waiter") +} + +func tableExistsStateRetryable(ctx context.Context, input *DescribeTableInput, output *DescribeTableOutput, err error) (bool, error) { + + if err == nil { + v1 := output.Table + var v2 types.TableStatus + if v1 != nil { + v3 := v1.TableStatus + v2 = v3 + } + expectedValue := "ACTIVE" + var pathValue string + pathValue = string(v2) + if pathValue == expectedValue { + return false, nil + } + } + + if err != nil { + var errorType *types.ResourceNotFoundException + if errors.As(err, &errorType) { + return true, nil + } + } + + if err != nil { + return false, err + } + return true, nil +} + +// TableNotExistsWaiterOptions are waiter options for TableNotExistsWaiter +type TableNotExistsWaiterOptions struct { + + // Set of options to modify how an operation is invoked. These apply to all + // operations invoked for this client. Use functional options on operation call to + // modify this list for per operation behavior. + // + // Passing options here is functionally equivalent to passing values to this + // config's ClientOptions field that extend the inner client's APIOptions directly. + APIOptions []func(*middleware.Stack) error + + // Functional options to be passed to all operations invoked by this client. + // + // Function values that modify the inner APIOptions are applied after the waiter + // config's own APIOptions modifiers. + ClientOptions []func(*Options) + + // MinDelay is the minimum amount of time to delay between retries. If unset, + // TableNotExistsWaiter will use default minimum delay of 20 seconds. Note that + // MinDelay must resolve to a value lesser than or equal to the MaxDelay. + MinDelay time.Duration + + // MaxDelay is the maximum amount of time to delay between retries. If unset or + // set to zero, TableNotExistsWaiter will use default max delay of 120 seconds. + // Note that MaxDelay must resolve to value greater than or equal to the MinDelay. + MaxDelay time.Duration + + // LogWaitAttempts is used to enable logging for waiter retry attempts + LogWaitAttempts bool + + // Retryable is function that can be used to override the service defined + // waiter-behavior based on operation output, or returned error. This function is + // used by the waiter to decide if a state is retryable or a terminal state. + // + // By default service-modeled logic will populate this option. This option can + // thus be used to define a custom waiter state with fall-back to service-modeled + // waiter state mutators.The function returns an error in case of a failure state. + // In case of retry state, this function returns a bool value of true and nil + // error, while in case of success it returns a bool value of false and nil error. + Retryable func(context.Context, *DescribeTableInput, *DescribeTableOutput, error) (bool, error) +} + +// TableNotExistsWaiter defines the waiters for TableNotExists +type TableNotExistsWaiter struct { + client DescribeTableAPIClient + + options TableNotExistsWaiterOptions +} + +// NewTableNotExistsWaiter constructs a TableNotExistsWaiter. +func NewTableNotExistsWaiter(client DescribeTableAPIClient, optFns ...func(*TableNotExistsWaiterOptions)) *TableNotExistsWaiter { + options := TableNotExistsWaiterOptions{} + options.MinDelay = 20 * time.Second + options.MaxDelay = 120 * time.Second + options.Retryable = tableNotExistsStateRetryable + + for _, fn := range optFns { + fn(&options) + } + return &TableNotExistsWaiter{ + client: client, + options: options, + } +} + +// Wait calls the waiter function for TableNotExists waiter. The maxWaitDur is the +// maximum wait duration the waiter will wait. The maxWaitDur is required and must +// be greater than zero. +func (w *TableNotExistsWaiter) Wait(ctx context.Context, params *DescribeTableInput, maxWaitDur time.Duration, optFns ...func(*TableNotExistsWaiterOptions)) error { + _, err := w.WaitForOutput(ctx, params, maxWaitDur, optFns...) + return err +} + +// WaitForOutput calls the waiter function for TableNotExists waiter and returns +// the output of the successful operation. The maxWaitDur is the maximum wait +// duration the waiter will wait. The maxWaitDur is required and must be greater +// than zero. +func (w *TableNotExistsWaiter) WaitForOutput(ctx context.Context, params *DescribeTableInput, maxWaitDur time.Duration, optFns ...func(*TableNotExistsWaiterOptions)) (*DescribeTableOutput, error) { + if maxWaitDur <= 0 { + return nil, fmt.Errorf("maximum wait time for waiter must be greater than zero") + } + + options := w.options + for _, fn := range optFns { + fn(&options) + } + + if options.MaxDelay <= 0 { + options.MaxDelay = 120 * time.Second + } + + if options.MinDelay > options.MaxDelay { + return nil, fmt.Errorf("minimum waiter delay %v must be lesser than or equal to maximum waiter delay of %v.", options.MinDelay, options.MaxDelay) + } + + ctx, cancelFn := context.WithTimeout(ctx, maxWaitDur) + defer cancelFn() + + logger := smithywaiter.Logger{} + remainingTime := maxWaitDur + + var attempt int64 + for { + + attempt++ + apiOptions := options.APIOptions + start := time.Now() + + if options.LogWaitAttempts { + logger.Attempt = attempt + apiOptions = append([]func(*middleware.Stack) error{}, options.APIOptions...) + apiOptions = append(apiOptions, logger.AddLogger) + } + + out, err := w.client.DescribeTable(ctx, params, func(o *Options) { + baseOpts := []func(*Options){ + addIsWaiterUserAgent, + } + o.APIOptions = append(o.APIOptions, apiOptions...) + for _, opt := range baseOpts { + opt(o) + } + for _, opt := range options.ClientOptions { + opt(o) + } + }) + + retryable, err := options.Retryable(ctx, params, out, err) + if err != nil { + return nil, err + } + if !retryable { + return out, nil + } + + remainingTime -= time.Since(start) + if remainingTime < options.MinDelay || remainingTime <= 0 { + break + } + + // compute exponential backoff between waiter retries + delay, err := smithywaiter.ComputeDelay( + attempt, options.MinDelay, options.MaxDelay, remainingTime, + ) + if err != nil { + return nil, fmt.Errorf("error computing waiter delay, %w", err) + } + + remainingTime -= delay + // sleep for the delay amount before invoking a request + if err := smithytime.SleepWithContext(ctx, delay); err != nil { + return nil, fmt.Errorf("request cancelled while waiting, %w", err) + } + } + return nil, fmt.Errorf("exceeded max wait time for TableNotExists waiter") +} + +func tableNotExistsStateRetryable(ctx context.Context, input *DescribeTableInput, output *DescribeTableOutput, err error) (bool, error) { + + if err != nil { + var errorType *types.ResourceNotFoundException + if errors.As(err, &errorType) { + return false, nil + } + } + + if err != nil { + return false, err + } + return true, nil +} + +func addOpDescribeTableDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error { + return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{ + Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){ + func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) { + opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS + opt.Logger = o.Logger + }, + }, + DiscoverOperation: c.fetchOpDescribeTableDiscoverEndpoint, + EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery, + EndpointDiscoveryRequired: false, + Region: o.Region, + }, "ResolveEndpointV2", middleware.After) +} + +func (c *Client) fetchOpDescribeTableDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) { + input := getOperationInput(ctx) + in, ok := input.(*DescribeTableInput) + if !ok { + return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input) + } + _ = in + + identifierMap := make(map[string]string, 0) + identifierMap["sdk#Region"] = region + + key := fmt.Sprintf("DynamoDB.%v", identifierMap) + + if v, ok := c.endpointCache.Get(key); ok { + return v, nil + } + + discoveryOperationInput := &DescribeEndpointsInput{} + + opt := internalEndpointDiscovery.DiscoverEndpointOptions{} + for _, fn := range optFns { + fn(&opt) + } + + go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt) + return internalEndpointDiscovery.WeightedAddress{}, nil +} + +// DescribeTableAPIClient is a client that implements the DescribeTable operation. +type DescribeTableAPIClient interface { + DescribeTable(context.Context, *DescribeTableInput, ...func(*Options)) (*DescribeTableOutput, error) +} + +var _ DescribeTableAPIClient = (*Client)(nil) + +func newServiceMetadataMiddleware_opDescribeTable(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DescribeTable", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeTableReplicaAutoScaling.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeTableReplicaAutoScaling.go new file mode 100644 index 0000000000..f8e2f77b78 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeTableReplicaAutoScaling.go @@ -0,0 +1,206 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package dynamodb + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Describes auto scaling settings across replicas of the global table at once. +func (c *Client) DescribeTableReplicaAutoScaling(ctx context.Context, params *DescribeTableReplicaAutoScalingInput, optFns ...func(*Options)) (*DescribeTableReplicaAutoScalingOutput, error) { + if params == nil { + params = &DescribeTableReplicaAutoScalingInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DescribeTableReplicaAutoScaling", params, optFns, c.addOperationDescribeTableReplicaAutoScalingMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DescribeTableReplicaAutoScalingOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DescribeTableReplicaAutoScalingInput struct { + + // The name of the table. You can also provide the Amazon Resource Name (ARN) of + // the table in this parameter. + // + // This member is required. + TableName *string + + noSmithyDocumentSerde +} + +func (in *DescribeTableReplicaAutoScalingInput) bindEndpointParams(p *EndpointParameters) { + + p.ResourceArn = in.TableName + +} + +type DescribeTableReplicaAutoScalingOutput struct { + + // Represents the auto scaling properties of the table. + TableAutoScalingDescription *types.TableAutoScalingDescription + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDescribeTableReplicaAutoScalingMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson10_serializeOpDescribeTableReplicaAutoScaling{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpDescribeTableReplicaAutoScaling{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "DescribeTableReplicaAutoScaling"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpDescribeTableReplicaAutoScalingValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeTableReplicaAutoScaling(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addValidateResponseChecksum(stack, options); err != nil { + return err + } + if err = addAcceptEncodingGzip(stack, options); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDescribeTableReplicaAutoScaling(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DescribeTableReplicaAutoScaling", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeTimeToLive.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeTimeToLive.go new file mode 100644 index 0000000000..5ea9ab1eea --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeTimeToLive.go @@ -0,0 +1,253 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package dynamodb + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Gives a description of the Time to Live (TTL) status on the specified table. +func (c *Client) DescribeTimeToLive(ctx context.Context, params *DescribeTimeToLiveInput, optFns ...func(*Options)) (*DescribeTimeToLiveOutput, error) { + if params == nil { + params = &DescribeTimeToLiveInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DescribeTimeToLive", params, optFns, c.addOperationDescribeTimeToLiveMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DescribeTimeToLiveOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DescribeTimeToLiveInput struct { + + // The name of the table to be described. You can also provide the Amazon Resource + // Name (ARN) of the table in this parameter. + // + // This member is required. + TableName *string + + noSmithyDocumentSerde +} + +func (in *DescribeTimeToLiveInput) bindEndpointParams(p *EndpointParameters) { + + p.ResourceArn = in.TableName + +} + +type DescribeTimeToLiveOutput struct { + + // + TimeToLiveDescription *types.TimeToLiveDescription + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDescribeTimeToLiveMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson10_serializeOpDescribeTimeToLive{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpDescribeTimeToLive{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "DescribeTimeToLive"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpDescribeTimeToLiveDiscoverEndpointMiddleware(stack, options, c); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpDescribeTimeToLiveValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeTimeToLive(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addValidateResponseChecksum(stack, options); err != nil { + return err + } + if err = addAcceptEncodingGzip(stack, options); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func addOpDescribeTimeToLiveDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error { + return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{ + Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){ + func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) { + opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS + opt.Logger = o.Logger + }, + }, + DiscoverOperation: c.fetchOpDescribeTimeToLiveDiscoverEndpoint, + EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery, + EndpointDiscoveryRequired: false, + Region: o.Region, + }, "ResolveEndpointV2", middleware.After) +} + +func (c *Client) fetchOpDescribeTimeToLiveDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) { + input := getOperationInput(ctx) + in, ok := input.(*DescribeTimeToLiveInput) + if !ok { + return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input) + } + _ = in + + identifierMap := make(map[string]string, 0) + identifierMap["sdk#Region"] = region + + key := fmt.Sprintf("DynamoDB.%v", identifierMap) + + if v, ok := c.endpointCache.Get(key); ok { + return v, nil + } + + discoveryOperationInput := &DescribeEndpointsInput{} + + opt := internalEndpointDiscovery.DiscoverEndpointOptions{} + for _, fn := range optFns { + fn(&opt) + } + + go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt) + return internalEndpointDiscovery.WeightedAddress{}, nil +} + +func newServiceMetadataMiddleware_opDescribeTimeToLive(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DescribeTimeToLive", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DisableKinesisStreamingDestination.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DisableKinesisStreamingDestination.go new file mode 100644 index 0000000000..5df8f9ea0d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DisableKinesisStreamingDestination.go @@ -0,0 +1,271 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package dynamodb + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Stops replication from the DynamoDB table to the Kinesis data stream. This is +// done without deleting either of the resources. +func (c *Client) DisableKinesisStreamingDestination(ctx context.Context, params *DisableKinesisStreamingDestinationInput, optFns ...func(*Options)) (*DisableKinesisStreamingDestinationOutput, error) { + if params == nil { + params = &DisableKinesisStreamingDestinationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DisableKinesisStreamingDestination", params, optFns, c.addOperationDisableKinesisStreamingDestinationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DisableKinesisStreamingDestinationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DisableKinesisStreamingDestinationInput struct { + + // The ARN for a Kinesis data stream. + // + // This member is required. + StreamArn *string + + // The name of the DynamoDB table. You can also provide the Amazon Resource Name + // (ARN) of the table in this parameter. + // + // This member is required. + TableName *string + + // The source for the Kinesis streaming information that is being enabled. + EnableKinesisStreamingConfiguration *types.EnableKinesisStreamingConfiguration + + noSmithyDocumentSerde +} + +func (in *DisableKinesisStreamingDestinationInput) bindEndpointParams(p *EndpointParameters) { + + p.ResourceArn = in.TableName + +} + +type DisableKinesisStreamingDestinationOutput struct { + + // The current status of the replication. + DestinationStatus types.DestinationStatus + + // The destination for the Kinesis streaming information that is being enabled. + EnableKinesisStreamingConfiguration *types.EnableKinesisStreamingConfiguration + + // The ARN for the specific Kinesis data stream. + StreamArn *string + + // The name of the table being modified. + TableName *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDisableKinesisStreamingDestinationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson10_serializeOpDisableKinesisStreamingDestination{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpDisableKinesisStreamingDestination{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "DisableKinesisStreamingDestination"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpDisableKinesisStreamingDestinationDiscoverEndpointMiddleware(stack, options, c); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpDisableKinesisStreamingDestinationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDisableKinesisStreamingDestination(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addValidateResponseChecksum(stack, options); err != nil { + return err + } + if err = addAcceptEncodingGzip(stack, options); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func addOpDisableKinesisStreamingDestinationDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error { + return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{ + Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){ + func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) { + opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS + opt.Logger = o.Logger + }, + }, + DiscoverOperation: c.fetchOpDisableKinesisStreamingDestinationDiscoverEndpoint, + EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery, + EndpointDiscoveryRequired: false, + Region: o.Region, + }, "ResolveEndpointV2", middleware.After) +} + +func (c *Client) fetchOpDisableKinesisStreamingDestinationDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) { + input := getOperationInput(ctx) + in, ok := input.(*DisableKinesisStreamingDestinationInput) + if !ok { + return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input) + } + _ = in + + identifierMap := make(map[string]string, 0) + identifierMap["sdk#Region"] = region + + key := fmt.Sprintf("DynamoDB.%v", identifierMap) + + if v, ok := c.endpointCache.Get(key); ok { + return v, nil + } + + discoveryOperationInput := &DescribeEndpointsInput{} + + opt := internalEndpointDiscovery.DiscoverEndpointOptions{} + for _, fn := range optFns { + fn(&opt) + } + + go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt) + return internalEndpointDiscovery.WeightedAddress{}, nil +} + +func newServiceMetadataMiddleware_opDisableKinesisStreamingDestination(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DisableKinesisStreamingDestination", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_EnableKinesisStreamingDestination.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_EnableKinesisStreamingDestination.go new file mode 100644 index 0000000000..c7b047c7f2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_EnableKinesisStreamingDestination.go @@ -0,0 +1,273 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package dynamodb + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Starts table data replication to the specified Kinesis data stream at a +// timestamp chosen during the enable workflow. If this operation doesn't return +// results immediately, use DescribeKinesisStreamingDestination to check if +// streaming to the Kinesis data stream is ACTIVE. +func (c *Client) EnableKinesisStreamingDestination(ctx context.Context, params *EnableKinesisStreamingDestinationInput, optFns ...func(*Options)) (*EnableKinesisStreamingDestinationOutput, error) { + if params == nil { + params = &EnableKinesisStreamingDestinationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "EnableKinesisStreamingDestination", params, optFns, c.addOperationEnableKinesisStreamingDestinationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*EnableKinesisStreamingDestinationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type EnableKinesisStreamingDestinationInput struct { + + // The ARN for a Kinesis data stream. + // + // This member is required. + StreamArn *string + + // The name of the DynamoDB table. You can also provide the Amazon Resource Name + // (ARN) of the table in this parameter. + // + // This member is required. + TableName *string + + // The source for the Kinesis streaming information that is being enabled. + EnableKinesisStreamingConfiguration *types.EnableKinesisStreamingConfiguration + + noSmithyDocumentSerde +} + +func (in *EnableKinesisStreamingDestinationInput) bindEndpointParams(p *EndpointParameters) { + + p.ResourceArn = in.TableName + +} + +type EnableKinesisStreamingDestinationOutput struct { + + // The current status of the replication. + DestinationStatus types.DestinationStatus + + // The destination for the Kinesis streaming information that is being enabled. + EnableKinesisStreamingConfiguration *types.EnableKinesisStreamingConfiguration + + // The ARN for the specific Kinesis data stream. + StreamArn *string + + // The name of the table being modified. + TableName *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationEnableKinesisStreamingDestinationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson10_serializeOpEnableKinesisStreamingDestination{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpEnableKinesisStreamingDestination{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "EnableKinesisStreamingDestination"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpEnableKinesisStreamingDestinationDiscoverEndpointMiddleware(stack, options, c); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpEnableKinesisStreamingDestinationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opEnableKinesisStreamingDestination(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addValidateResponseChecksum(stack, options); err != nil { + return err + } + if err = addAcceptEncodingGzip(stack, options); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func addOpEnableKinesisStreamingDestinationDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error { + return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{ + Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){ + func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) { + opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS + opt.Logger = o.Logger + }, + }, + DiscoverOperation: c.fetchOpEnableKinesisStreamingDestinationDiscoverEndpoint, + EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery, + EndpointDiscoveryRequired: false, + Region: o.Region, + }, "ResolveEndpointV2", middleware.After) +} + +func (c *Client) fetchOpEnableKinesisStreamingDestinationDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) { + input := getOperationInput(ctx) + in, ok := input.(*EnableKinesisStreamingDestinationInput) + if !ok { + return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input) + } + _ = in + + identifierMap := make(map[string]string, 0) + identifierMap["sdk#Region"] = region + + key := fmt.Sprintf("DynamoDB.%v", identifierMap) + + if v, ok := c.endpointCache.Get(key); ok { + return v, nil + } + + discoveryOperationInput := &DescribeEndpointsInput{} + + opt := internalEndpointDiscovery.DiscoverEndpointOptions{} + for _, fn := range optFns { + fn(&opt) + } + + go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt) + return internalEndpointDiscovery.WeightedAddress{}, nil +} + +func newServiceMetadataMiddleware_opEnableKinesisStreamingDestination(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "EnableKinesisStreamingDestination", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ExecuteStatement.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ExecuteStatement.go new file mode 100644 index 0000000000..655372e143 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ExecuteStatement.go @@ -0,0 +1,283 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package dynamodb + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation allows you to perform reads and singleton writes on data stored +// in DynamoDB, using PartiQL. +// +// For PartiQL reads ( SELECT statement), if the total number of processed items +// exceeds the maximum dataset size limit of 1 MB, the read stops and results are +// returned to the user as a LastEvaluatedKey value to continue the read in a +// subsequent operation. If the filter criteria in WHERE clause does not match any +// data, the read will return an empty result set. +// +// A single SELECT statement response can return up to the maximum number of items +// (if using the Limit parameter) or a maximum of 1 MB of data (and then apply any +// filtering to the results using WHERE clause). If LastEvaluatedKey is present in +// the response, you need to paginate the result set. If NextToken is present, you +// need to paginate the result set and include NextToken . +func (c *Client) ExecuteStatement(ctx context.Context, params *ExecuteStatementInput, optFns ...func(*Options)) (*ExecuteStatementOutput, error) { + if params == nil { + params = &ExecuteStatementInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ExecuteStatement", params, optFns, c.addOperationExecuteStatementMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ExecuteStatementOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ExecuteStatementInput struct { + + // The PartiQL statement representing the operation to run. + // + // This member is required. + Statement *string + + // The consistency of a read operation. If set to true , then a strongly consistent + // read is used; otherwise, an eventually consistent read is used. + ConsistentRead *bool + + // The maximum number of items to evaluate (not necessarily the number of matching + // items). If DynamoDB processes the number of items up to the limit while + // processing the results, it stops the operation and returns the matching values + // up to that point, along with a key in LastEvaluatedKey to apply in a subsequent + // operation so you can pick up where you left off. Also, if the processed dataset + // size exceeds 1 MB before DynamoDB reaches this limit, it stops the operation and + // returns the matching values up to the limit, and a key in LastEvaluatedKey to + // apply in a subsequent operation to continue the operation. + Limit *int32 + + // Set this value to get remaining results, if NextToken was returned in the + // statement response. + NextToken *string + + // The parameters for the PartiQL statement, if any. + Parameters []types.AttributeValue + + // Determines the level of detail about either provisioned or on-demand throughput + // consumption that is returned in the response: + // + // - INDEXES - The response includes the aggregate ConsumedCapacity for the + // operation, together with ConsumedCapacity for each table and secondary index + // that was accessed. + // + // Note that some operations, such as GetItem and BatchGetItem , do not access any + // indexes at all. In these cases, specifying INDEXES will only return + // ConsumedCapacity information for table(s). + // + // - TOTAL - The response includes only the aggregate ConsumedCapacity for the + // operation. + // + // - NONE - No ConsumedCapacity details are included in the response. + ReturnConsumedCapacity types.ReturnConsumedCapacity + + // An optional parameter that returns the item attributes for an ExecuteStatement + // operation that failed a condition check. + // + // There is no additional cost associated with requesting a return value aside + // from the small network and processing overhead of receiving a larger response. + // No read capacity units are consumed. + ReturnValuesOnConditionCheckFailure types.ReturnValuesOnConditionCheckFailure + + noSmithyDocumentSerde +} + +type ExecuteStatementOutput struct { + + // The capacity units consumed by an operation. The data returned includes the + // total provisioned throughput consumed, along with statistics for the table and + // any indexes involved in the operation. ConsumedCapacity is only returned if the + // request asked for it. For more information, see [Provisioned capacity mode]in the Amazon DynamoDB + // Developer Guide. + // + // [Provisioned capacity mode]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/provisioned-capacity-mode.html + ConsumedCapacity *types.ConsumedCapacity + + // If a read operation was used, this property will contain the result of the read + // operation; a map of attribute names and their values. For the write operations + // this value will be empty. + Items []map[string]types.AttributeValue + + // The primary key of the item where the operation stopped, inclusive of the + // previous result set. Use this value to start a new operation, excluding this + // value in the new request. If LastEvaluatedKey is empty, then the "last page" of + // results has been processed and there is no more data to be retrieved. If + // LastEvaluatedKey is not empty, it does not necessarily mean that there is more + // data in the result set. The only way to know when you have reached the end of + // the result set is when LastEvaluatedKey is empty. + LastEvaluatedKey map[string]types.AttributeValue + + // If the response of a read request exceeds the response payload limit DynamoDB + // will set this value in the response. If set, you can use that this value in the + // subsequent request to get the remaining results. + NextToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationExecuteStatementMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson10_serializeOpExecuteStatement{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpExecuteStatement{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "ExecuteStatement"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpExecuteStatementValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opExecuteStatement(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addValidateResponseChecksum(stack, options); err != nil { + return err + } + if err = addAcceptEncodingGzip(stack, options); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opExecuteStatement(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ExecuteStatement", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ExecuteTransaction.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ExecuteTransaction.go new file mode 100644 index 0000000000..85c587bc3b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ExecuteTransaction.go @@ -0,0 +1,258 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package dynamodb + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This operation allows you to perform transactional reads or writes on data +// stored in DynamoDB, using PartiQL. +// +// The entire transaction must consist of either read statements or write +// statements, you cannot mix both in one transaction. The EXISTS function is an +// exception and can be used to check the condition of specific attributes of the +// item in a similar manner to ConditionCheck in the [TransactWriteItems] API. +// +// [TransactWriteItems]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/transaction-apis.html#transaction-apis-txwriteitems +func (c *Client) ExecuteTransaction(ctx context.Context, params *ExecuteTransactionInput, optFns ...func(*Options)) (*ExecuteTransactionOutput, error) { + if params == nil { + params = &ExecuteTransactionInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ExecuteTransaction", params, optFns, c.addOperationExecuteTransactionMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ExecuteTransactionOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ExecuteTransactionInput struct { + + // The list of PartiQL statements representing the transaction to run. + // + // This member is required. + TransactStatements []types.ParameterizedStatement + + // Set this value to get remaining results, if NextToken was returned in the + // statement response. + ClientRequestToken *string + + // Determines the level of detail about either provisioned or on-demand throughput + // consumption that is returned in the response. For more information, see [TransactGetItems]and [TransactWriteItems]. + // + // [TransactWriteItems]: https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_TransactWriteItems.html + // [TransactGetItems]: https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_TransactGetItems.html + ReturnConsumedCapacity types.ReturnConsumedCapacity + + noSmithyDocumentSerde +} + +type ExecuteTransactionOutput struct { + + // The capacity units consumed by the entire operation. The values of the list are + // ordered according to the ordering of the statements. + ConsumedCapacity []types.ConsumedCapacity + + // The response to a PartiQL transaction. + Responses []types.ItemResponse + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationExecuteTransactionMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson10_serializeOpExecuteTransaction{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpExecuteTransaction{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "ExecuteTransaction"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addIdempotencyToken_opExecuteTransactionMiddleware(stack, options); err != nil { + return err + } + if err = addOpExecuteTransactionValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opExecuteTransaction(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addValidateResponseChecksum(stack, options); err != nil { + return err + } + if err = addAcceptEncodingGzip(stack, options); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +type idempotencyToken_initializeOpExecuteTransaction struct { + tokenProvider IdempotencyTokenProvider +} + +func (*idempotencyToken_initializeOpExecuteTransaction) ID() string { + return "OperationIdempotencyTokenAutoFill" +} + +func (m *idempotencyToken_initializeOpExecuteTransaction) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + if m.tokenProvider == nil { + return next.HandleInitialize(ctx, in) + } + + input, ok := in.Parameters.(*ExecuteTransactionInput) + if !ok { + return out, metadata, fmt.Errorf("expected middleware input to be of type *ExecuteTransactionInput ") + } + + if input.ClientRequestToken == nil { + t, err := m.tokenProvider.GetIdempotencyToken() + if err != nil { + return out, metadata, err + } + input.ClientRequestToken = &t + } + return next.HandleInitialize(ctx, in) +} +func addIdempotencyToken_opExecuteTransactionMiddleware(stack *middleware.Stack, cfg Options) error { + return stack.Initialize.Add(&idempotencyToken_initializeOpExecuteTransaction{tokenProvider: cfg.IdempotencyTokenProvider}, middleware.Before) +} + +func newServiceMetadataMiddleware_opExecuteTransaction(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ExecuteTransaction", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ExportTableToPointInTime.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ExportTableToPointInTime.go new file mode 100644 index 0000000000..491a448232 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ExportTableToPointInTime.go @@ -0,0 +1,304 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package dynamodb + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "time" +) + +// Exports table data to an S3 bucket. The table must have point in time recovery +// enabled, and you can export data from any time within the point in time recovery +// window. +func (c *Client) ExportTableToPointInTime(ctx context.Context, params *ExportTableToPointInTimeInput, optFns ...func(*Options)) (*ExportTableToPointInTimeOutput, error) { + if params == nil { + params = &ExportTableToPointInTimeInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ExportTableToPointInTime", params, optFns, c.addOperationExportTableToPointInTimeMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ExportTableToPointInTimeOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ExportTableToPointInTimeInput struct { + + // The name of the Amazon S3 bucket to export the snapshot to. + // + // This member is required. + S3Bucket *string + + // The Amazon Resource Name (ARN) associated with the table to export. + // + // This member is required. + TableArn *string + + // Providing a ClientToken makes the call to ExportTableToPointInTimeInput + // idempotent, meaning that multiple identical calls have the same effect as one + // single call. + // + // A client token is valid for 8 hours after the first request that uses it is + // completed. After 8 hours, any request with the same client token is treated as a + // new request. Do not resubmit the same request with the same client token for + // more than 8 hours, or the result might not be idempotent. + // + // If you submit a request with the same client token but a change in other + // parameters within the 8-hour idempotency window, DynamoDB returns an + // ImportConflictException . + ClientToken *string + + // The format for the exported data. Valid values for ExportFormat are + // DYNAMODB_JSON or ION . + ExportFormat types.ExportFormat + + // Time in the past from which to export table data, counted in seconds from the + // start of the Unix epoch. The table export will be a snapshot of the table's + // state at this point in time. + ExportTime *time.Time + + // Choice of whether to execute as a full export or incremental export. Valid + // values are FULL_EXPORT or INCREMENTAL_EXPORT. The default value is FULL_EXPORT. + // If INCREMENTAL_EXPORT is provided, the IncrementalExportSpecification must also + // be used. + ExportType types.ExportType + + // Optional object containing the parameters specific to an incremental export. + IncrementalExportSpecification *types.IncrementalExportSpecification + + // The ID of the Amazon Web Services account that owns the bucket the export will + // be stored in. + // + // S3BucketOwner is a required parameter when exporting to a S3 bucket in another + // account. + S3BucketOwner *string + + // The Amazon S3 bucket prefix to use as the file name and path of the exported + // snapshot. + S3Prefix *string + + // Type of encryption used on the bucket where export data will be stored. Valid + // values for S3SseAlgorithm are: + // + // - AES256 - server-side encryption with Amazon S3 managed keys + // + // - KMS - server-side encryption with KMS managed keys + S3SseAlgorithm types.S3SseAlgorithm + + // The ID of the KMS managed key used to encrypt the S3 bucket where export data + // will be stored (if applicable). + S3SseKmsKeyId *string + + noSmithyDocumentSerde +} + +func (in *ExportTableToPointInTimeInput) bindEndpointParams(p *EndpointParameters) { + + p.ResourceArn = in.TableArn + +} + +type ExportTableToPointInTimeOutput struct { + + // Contains a description of the table export. + ExportDescription *types.ExportDescription + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationExportTableToPointInTimeMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson10_serializeOpExportTableToPointInTime{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpExportTableToPointInTime{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "ExportTableToPointInTime"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addIdempotencyToken_opExportTableToPointInTimeMiddleware(stack, options); err != nil { + return err + } + if err = addOpExportTableToPointInTimeValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opExportTableToPointInTime(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addValidateResponseChecksum(stack, options); err != nil { + return err + } + if err = addAcceptEncodingGzip(stack, options); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +type idempotencyToken_initializeOpExportTableToPointInTime struct { + tokenProvider IdempotencyTokenProvider +} + +func (*idempotencyToken_initializeOpExportTableToPointInTime) ID() string { + return "OperationIdempotencyTokenAutoFill" +} + +func (m *idempotencyToken_initializeOpExportTableToPointInTime) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + if m.tokenProvider == nil { + return next.HandleInitialize(ctx, in) + } + + input, ok := in.Parameters.(*ExportTableToPointInTimeInput) + if !ok { + return out, metadata, fmt.Errorf("expected middleware input to be of type *ExportTableToPointInTimeInput ") + } + + if input.ClientToken == nil { + t, err := m.tokenProvider.GetIdempotencyToken() + if err != nil { + return out, metadata, err + } + input.ClientToken = &t + } + return next.HandleInitialize(ctx, in) +} +func addIdempotencyToken_opExportTableToPointInTimeMiddleware(stack *middleware.Stack, cfg Options) error { + return stack.Initialize.Add(&idempotencyToken_initializeOpExportTableToPointInTime{tokenProvider: cfg.IdempotencyTokenProvider}, middleware.Before) +} + +func newServiceMetadataMiddleware_opExportTableToPointInTime(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ExportTableToPointInTime", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_GetItem.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_GetItem.go new file mode 100644 index 0000000000..5a8e1f6bed --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_GetItem.go @@ -0,0 +1,360 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package dynamodb + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// The GetItem operation returns a set of attributes for the item with the given +// primary key. If there is no matching item, GetItem does not return any data and +// there will be no Item element in the response. +// +// GetItem provides an eventually consistent read by default. If your application +// requires a strongly consistent read, set ConsistentRead to true . Although a +// strongly consistent read might take more time than an eventually consistent +// read, it always returns the last updated value. +func (c *Client) GetItem(ctx context.Context, params *GetItemInput, optFns ...func(*Options)) (*GetItemOutput, error) { + if params == nil { + params = &GetItemInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetItem", params, optFns, c.addOperationGetItemMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetItemOutput) + out.ResultMetadata = metadata + return out, nil +} + +// Represents the input of a GetItem operation. +type GetItemInput struct { + + // A map of attribute names to AttributeValue objects, representing the primary + // key of the item to retrieve. + // + // For the primary key, you must provide all of the attributes. For example, with + // a simple primary key, you only need to provide a value for the partition key. + // For a composite primary key, you must provide values for both the partition key + // and the sort key. + // + // This member is required. + Key map[string]types.AttributeValue + + // The name of the table containing the requested item. You can also provide the + // Amazon Resource Name (ARN) of the table in this parameter. + // + // This member is required. + TableName *string + + // This is a legacy parameter. Use ProjectionExpression instead. For more + // information, see [AttributesToGet]in the Amazon DynamoDB Developer Guide. + // + // [AttributesToGet]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributesToGet.html + AttributesToGet []string + + // Determines the read consistency model: If set to true , then the operation uses + // strongly consistent reads; otherwise, the operation uses eventually consistent + // reads. + ConsistentRead *bool + + // One or more substitution tokens for attribute names in an expression. The + // following are some use cases for using ExpressionAttributeNames : + // + // - To access an attribute whose name conflicts with a DynamoDB reserved word. + // + // - To create a placeholder for repeating occurrences of an attribute name in + // an expression. + // + // - To prevent special characters in an attribute name from being + // misinterpreted in an expression. + // + // Use the # character in an expression to dereference an attribute name. For + // example, consider the following attribute name: + // + // - Percentile + // + // The name of this attribute conflicts with a reserved word, so it cannot be used + // directly in an expression. (For the complete list of reserved words, see [Reserved Words]in the + // Amazon DynamoDB Developer Guide). To work around this, you could specify the + // following for ExpressionAttributeNames : + // + // - {"#P":"Percentile"} + // + // You could then use this substitution in an expression, as in this example: + // + // - #P = :val + // + // Tokens that begin with the : character are expression attribute values, which + // are placeholders for the actual value at runtime. + // + // For more information on expression attribute names, see [Specifying Item Attributes] in the Amazon DynamoDB + // Developer Guide. + // + // [Reserved Words]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html + // [Specifying Item Attributes]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html + ExpressionAttributeNames map[string]string + + // A string that identifies one or more attributes to retrieve from the table. + // These attributes can include scalars, sets, or elements of a JSON document. The + // attributes in the expression must be separated by commas. + // + // If no attribute names are specified, then all attributes are returned. If any + // of the requested attributes are not found, they do not appear in the result. + // + // For more information, see [Specifying Item Attributes] in the Amazon DynamoDB Developer Guide. + // + // [Specifying Item Attributes]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html + ProjectionExpression *string + + // Determines the level of detail about either provisioned or on-demand throughput + // consumption that is returned in the response: + // + // - INDEXES - The response includes the aggregate ConsumedCapacity for the + // operation, together with ConsumedCapacity for each table and secondary index + // that was accessed. + // + // Note that some operations, such as GetItem and BatchGetItem , do not access any + // indexes at all. In these cases, specifying INDEXES will only return + // ConsumedCapacity information for table(s). + // + // - TOTAL - The response includes only the aggregate ConsumedCapacity for the + // operation. + // + // - NONE - No ConsumedCapacity details are included in the response. + ReturnConsumedCapacity types.ReturnConsumedCapacity + + noSmithyDocumentSerde +} + +func (in *GetItemInput) bindEndpointParams(p *EndpointParameters) { + + p.ResourceArn = in.TableName + +} + +// Represents the output of a GetItem operation. +type GetItemOutput struct { + + // The capacity units consumed by the GetItem operation. The data returned + // includes the total provisioned throughput consumed, along with statistics for + // the table and any indexes involved in the operation. ConsumedCapacity is only + // returned if the ReturnConsumedCapacity parameter was specified. For more + // information, see [Capacity unit consumption for read operations]in the Amazon DynamoDB Developer Guide. + // + // [Capacity unit consumption for read operations]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/read-write-operations.html#read-operation-consumption + ConsumedCapacity *types.ConsumedCapacity + + // A map of attribute names to AttributeValue objects, as specified by + // ProjectionExpression . + Item map[string]types.AttributeValue + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetItemMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson10_serializeOpGetItem{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpGetItem{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetItem"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpGetItemDiscoverEndpointMiddleware(stack, options, c); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpGetItemValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetItem(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addValidateResponseChecksum(stack, options); err != nil { + return err + } + if err = addAcceptEncodingGzip(stack, options); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func addOpGetItemDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error { + return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{ + Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){ + func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) { + opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS + opt.Logger = o.Logger + }, + }, + DiscoverOperation: c.fetchOpGetItemDiscoverEndpoint, + EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery, + EndpointDiscoveryRequired: false, + Region: o.Region, + }, "ResolveEndpointV2", middleware.After) +} + +func (c *Client) fetchOpGetItemDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) { + input := getOperationInput(ctx) + in, ok := input.(*GetItemInput) + if !ok { + return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input) + } + _ = in + + identifierMap := make(map[string]string, 0) + identifierMap["sdk#Region"] = region + + key := fmt.Sprintf("DynamoDB.%v", identifierMap) + + if v, ok := c.endpointCache.Get(key); ok { + return v, nil + } + + discoveryOperationInput := &DescribeEndpointsInput{} + + opt := internalEndpointDiscovery.DiscoverEndpointOptions{} + for _, fn := range optFns { + fn(&opt) + } + + go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt) + return internalEndpointDiscovery.WeightedAddress{}, nil +} + +func newServiceMetadataMiddleware_opGetItem(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetItem", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_GetResourcePolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_GetResourcePolicy.go new file mode 100644 index 0000000000..31bae3f42a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_GetResourcePolicy.go @@ -0,0 +1,286 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package dynamodb + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns the resource-based policy document attached to the resource, which can +// be a table or stream, in JSON format. +// +// GetResourcePolicy follows an [eventually consistent] model. The following list describes the outcomes +// when you issue the GetResourcePolicy request immediately after issuing another +// request: +// +// - If you issue a GetResourcePolicy request immediately after a +// PutResourcePolicy request, DynamoDB might return a PolicyNotFoundException . +// +// - If you issue a GetResourcePolicy request immediately after a +// DeleteResourcePolicy request, DynamoDB might return the policy that was +// present before the deletion request. +// +// - If you issue a GetResourcePolicy request immediately after a CreateTable +// request, which includes a resource-based policy, DynamoDB might return a +// ResourceNotFoundException or a PolicyNotFoundException . +// +// Because GetResourcePolicy uses an eventually consistent query, the metadata for +// your policy or table might not be available at that moment. Wait for a few +// seconds, and then retry the GetResourcePolicy request. +// +// After a GetResourcePolicy request returns a policy created using the +// PutResourcePolicy request, the policy will be applied in the authorization of +// requests to the resource. Because this process is eventually consistent, it will +// take some time to apply the policy to all requests to a resource. Policies that +// you attach while creating a table using the CreateTable request will always be +// applied to all requests for that table. +// +// [eventually consistent]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadConsistency.html +func (c *Client) GetResourcePolicy(ctx context.Context, params *GetResourcePolicyInput, optFns ...func(*Options)) (*GetResourcePolicyOutput, error) { + if params == nil { + params = &GetResourcePolicyInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetResourcePolicy", params, optFns, c.addOperationGetResourcePolicyMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetResourcePolicyOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetResourcePolicyInput struct { + + // The Amazon Resource Name (ARN) of the DynamoDB resource to which the policy is + // attached. The resources you can specify include tables and streams. + // + // This member is required. + ResourceArn *string + + noSmithyDocumentSerde +} + +func (in *GetResourcePolicyInput) bindEndpointParams(p *EndpointParameters) { + + p.ResourceArn = in.ResourceArn + +} + +type GetResourcePolicyOutput struct { + + // The resource-based policy document attached to the resource, which can be a + // table or stream, in JSON format. + Policy *string + + // A unique string that represents the revision ID of the policy. If you're + // comparing revision IDs, make sure to always use string comparison logic. + RevisionId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetResourcePolicyMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson10_serializeOpGetResourcePolicy{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpGetResourcePolicy{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetResourcePolicy"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpGetResourcePolicyDiscoverEndpointMiddleware(stack, options, c); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpGetResourcePolicyValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetResourcePolicy(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addValidateResponseChecksum(stack, options); err != nil { + return err + } + if err = addAcceptEncodingGzip(stack, options); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func addOpGetResourcePolicyDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error { + return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{ + Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){ + func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) { + opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS + opt.Logger = o.Logger + }, + }, + DiscoverOperation: c.fetchOpGetResourcePolicyDiscoverEndpoint, + EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery, + EndpointDiscoveryRequired: false, + Region: o.Region, + }, "ResolveEndpointV2", middleware.After) +} + +func (c *Client) fetchOpGetResourcePolicyDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) { + input := getOperationInput(ctx) + in, ok := input.(*GetResourcePolicyInput) + if !ok { + return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input) + } + _ = in + + identifierMap := make(map[string]string, 0) + identifierMap["sdk#Region"] = region + + key := fmt.Sprintf("DynamoDB.%v", identifierMap) + + if v, ok := c.endpointCache.Get(key); ok { + return v, nil + } + + discoveryOperationInput := &DescribeEndpointsInput{} + + opt := internalEndpointDiscovery.DiscoverEndpointOptions{} + for _, fn := range optFns { + fn(&opt) + } + + go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt) + return internalEndpointDiscovery.WeightedAddress{}, nil +} + +func newServiceMetadataMiddleware_opGetResourcePolicy(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetResourcePolicy", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ImportTable.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ImportTable.go new file mode 100644 index 0000000000..ab97739f8b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ImportTable.go @@ -0,0 +1,282 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package dynamodb + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Imports table data from an S3 bucket. +func (c *Client) ImportTable(ctx context.Context, params *ImportTableInput, optFns ...func(*Options)) (*ImportTableOutput, error) { + if params == nil { + params = &ImportTableInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ImportTable", params, optFns, c.addOperationImportTableMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ImportTableOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ImportTableInput struct { + + // The format of the source data. Valid values for ImportFormat are CSV , + // DYNAMODB_JSON or ION . + // + // This member is required. + InputFormat types.InputFormat + + // The S3 bucket that provides the source for the import. + // + // This member is required. + S3BucketSource *types.S3BucketSource + + // Parameters for the table to import the data into. + // + // This member is required. + TableCreationParameters *types.TableCreationParameters + + // Providing a ClientToken makes the call to ImportTableInput idempotent, meaning + // that multiple identical calls have the same effect as one single call. + // + // A client token is valid for 8 hours after the first request that uses it is + // completed. After 8 hours, any request with the same client token is treated as a + // new request. Do not resubmit the same request with the same client token for + // more than 8 hours, or the result might not be idempotent. + // + // If you submit a request with the same client token but a change in other + // parameters within the 8-hour idempotency window, DynamoDB returns an + // IdempotentParameterMismatch exception. + ClientToken *string + + // Type of compression to be used on the input coming from the imported table. + InputCompressionType types.InputCompressionType + + // Additional properties that specify how the input is formatted, + InputFormatOptions *types.InputFormatOptions + + noSmithyDocumentSerde +} + +func (in *ImportTableInput) bindEndpointParams(p *EndpointParameters) { + func() { + v1 := in.TableCreationParameters + var v2 *string + if v1 != nil { + v3 := v1.TableName + v2 = v3 + } + p.ResourceArn = v2 + }() + +} + +type ImportTableOutput struct { + + // Represents the properties of the table created for the import, and parameters + // of the import. The import parameters include import status, how many items were + // processed, and how many errors were encountered. + // + // This member is required. + ImportTableDescription *types.ImportTableDescription + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationImportTableMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson10_serializeOpImportTable{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpImportTable{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "ImportTable"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addIdempotencyToken_opImportTableMiddleware(stack, options); err != nil { + return err + } + if err = addOpImportTableValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opImportTable(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addValidateResponseChecksum(stack, options); err != nil { + return err + } + if err = addAcceptEncodingGzip(stack, options); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +type idempotencyToken_initializeOpImportTable struct { + tokenProvider IdempotencyTokenProvider +} + +func (*idempotencyToken_initializeOpImportTable) ID() string { + return "OperationIdempotencyTokenAutoFill" +} + +func (m *idempotencyToken_initializeOpImportTable) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + if m.tokenProvider == nil { + return next.HandleInitialize(ctx, in) + } + + input, ok := in.Parameters.(*ImportTableInput) + if !ok { + return out, metadata, fmt.Errorf("expected middleware input to be of type *ImportTableInput ") + } + + if input.ClientToken == nil { + t, err := m.tokenProvider.GetIdempotencyToken() + if err != nil { + return out, metadata, err + } + input.ClientToken = &t + } + return next.HandleInitialize(ctx, in) +} +func addIdempotencyToken_opImportTableMiddleware(stack *middleware.Stack, cfg Options) error { + return stack.Initialize.Add(&idempotencyToken_initializeOpImportTable{tokenProvider: cfg.IdempotencyTokenProvider}, middleware.Before) +} + +func newServiceMetadataMiddleware_opImportTable(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ImportTable", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListBackups.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListBackups.go new file mode 100644 index 0000000000..4d594fa5b0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListBackups.go @@ -0,0 +1,306 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package dynamodb + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "time" +) + +// List DynamoDB backups that are associated with an Amazon Web Services account +// and weren't made with Amazon Web Services Backup. To list these backups for a +// given table, specify TableName . ListBackups returns a paginated list of +// results with at most 1 MB worth of items in a page. You can also specify a +// maximum number of entries to be returned in a page. +// +// In the request, start time is inclusive, but end time is exclusive. Note that +// these boundaries are for the time at which the original backup was requested. +// +// You can call ListBackups a maximum of five times per second. +// +// If you want to retrieve the complete list of backups made with Amazon Web +// Services Backup, use the [Amazon Web Services Backup list API.] +// +// [Amazon Web Services Backup list API.]: https://docs.aws.amazon.com/aws-backup/latest/devguide/API_ListBackupJobs.html +func (c *Client) ListBackups(ctx context.Context, params *ListBackupsInput, optFns ...func(*Options)) (*ListBackupsOutput, error) { + if params == nil { + params = &ListBackupsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListBackups", params, optFns, c.addOperationListBackupsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListBackupsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListBackupsInput struct { + + // The backups from the table specified by BackupType are listed. + // + // Where BackupType can be: + // + // - USER - On-demand backup created by you. (The default setting if no other + // backup types are specified.) + // + // - SYSTEM - On-demand backup automatically created by DynamoDB. + // + // - ALL - All types of on-demand backups (USER and SYSTEM). + BackupType types.BackupTypeFilter + + // LastEvaluatedBackupArn is the Amazon Resource Name (ARN) of the backup last + // evaluated when the current page of results was returned, inclusive of the + // current page of results. This value may be specified as the + // ExclusiveStartBackupArn of a new ListBackups operation in order to fetch the + // next page of results. + ExclusiveStartBackupArn *string + + // Maximum number of backups to return at once. + Limit *int32 + + // Lists the backups from the table specified in TableName . You can also provide + // the Amazon Resource Name (ARN) of the table in this parameter. + TableName *string + + // Only backups created after this time are listed. TimeRangeLowerBound is + // inclusive. + TimeRangeLowerBound *time.Time + + // Only backups created before this time are listed. TimeRangeUpperBound is + // exclusive. + TimeRangeUpperBound *time.Time + + noSmithyDocumentSerde +} + +func (in *ListBackupsInput) bindEndpointParams(p *EndpointParameters) { + + p.ResourceArn = in.TableName + +} + +type ListBackupsOutput struct { + + // List of BackupSummary objects. + BackupSummaries []types.BackupSummary + + // The ARN of the backup last evaluated when the current page of results was + // returned, inclusive of the current page of results. This value may be specified + // as the ExclusiveStartBackupArn of a new ListBackups operation in order to fetch + // the next page of results. + // + // If LastEvaluatedBackupArn is empty, then the last page of results has been + // processed and there are no more results to be retrieved. + // + // If LastEvaluatedBackupArn is not empty, this may or may not indicate that there + // is more data to be returned. All results are guaranteed to have been returned if + // and only if no value for LastEvaluatedBackupArn is returned. + LastEvaluatedBackupArn *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListBackupsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson10_serializeOpListBackups{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpListBackups{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListBackups"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpListBackupsDiscoverEndpointMiddleware(stack, options, c); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListBackups(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addValidateResponseChecksum(stack, options); err != nil { + return err + } + if err = addAcceptEncodingGzip(stack, options); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func addOpListBackupsDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error { + return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{ + Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){ + func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) { + opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS + opt.Logger = o.Logger + }, + }, + DiscoverOperation: c.fetchOpListBackupsDiscoverEndpoint, + EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery, + EndpointDiscoveryRequired: false, + Region: o.Region, + }, "ResolveEndpointV2", middleware.After) +} + +func (c *Client) fetchOpListBackupsDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) { + input := getOperationInput(ctx) + in, ok := input.(*ListBackupsInput) + if !ok { + return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input) + } + _ = in + + identifierMap := make(map[string]string, 0) + identifierMap["sdk#Region"] = region + + key := fmt.Sprintf("DynamoDB.%v", identifierMap) + + if v, ok := c.endpointCache.Get(key); ok { + return v, nil + } + + discoveryOperationInput := &DescribeEndpointsInput{} + + opt := internalEndpointDiscovery.DiscoverEndpointOptions{} + for _, fn := range optFns { + fn(&opt) + } + + go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt) + return internalEndpointDiscovery.WeightedAddress{}, nil +} + +func newServiceMetadataMiddleware_opListBackups(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListBackups", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListContributorInsights.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListContributorInsights.go new file mode 100644 index 0000000000..d4870973e5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListContributorInsights.go @@ -0,0 +1,302 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package dynamodb + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a list of ContributorInsightsSummary for a table and all its global +// secondary indexes. +func (c *Client) ListContributorInsights(ctx context.Context, params *ListContributorInsightsInput, optFns ...func(*Options)) (*ListContributorInsightsOutput, error) { + if params == nil { + params = &ListContributorInsightsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListContributorInsights", params, optFns, c.addOperationListContributorInsightsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListContributorInsightsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListContributorInsightsInput struct { + + // Maximum number of results to return per page. + MaxResults int32 + + // A token to for the desired page, if there is one. + NextToken *string + + // The name of the table. You can also provide the Amazon Resource Name (ARN) of + // the table in this parameter. + TableName *string + + noSmithyDocumentSerde +} + +func (in *ListContributorInsightsInput) bindEndpointParams(p *EndpointParameters) { + + p.ResourceArn = in.TableName + +} + +type ListContributorInsightsOutput struct { + + // A list of ContributorInsightsSummary. + ContributorInsightsSummaries []types.ContributorInsightsSummary + + // A token to go to the next page if there is one. + NextToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListContributorInsightsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson10_serializeOpListContributorInsights{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpListContributorInsights{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListContributorInsights"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListContributorInsights(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addValidateResponseChecksum(stack, options); err != nil { + return err + } + if err = addAcceptEncodingGzip(stack, options); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +// ListContributorInsightsPaginatorOptions is the paginator options for +// ListContributorInsights +type ListContributorInsightsPaginatorOptions struct { + // Maximum number of results to return per page. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListContributorInsightsPaginator is a paginator for ListContributorInsights +type ListContributorInsightsPaginator struct { + options ListContributorInsightsPaginatorOptions + client ListContributorInsightsAPIClient + params *ListContributorInsightsInput + nextToken *string + firstPage bool +} + +// NewListContributorInsightsPaginator returns a new +// ListContributorInsightsPaginator +func NewListContributorInsightsPaginator(client ListContributorInsightsAPIClient, params *ListContributorInsightsInput, optFns ...func(*ListContributorInsightsPaginatorOptions)) *ListContributorInsightsPaginator { + if params == nil { + params = &ListContributorInsightsInput{} + } + + options := ListContributorInsightsPaginatorOptions{} + if params.MaxResults != 0 { + options.Limit = params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListContributorInsightsPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListContributorInsightsPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListContributorInsights page. +func (p *ListContributorInsightsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListContributorInsightsOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + params.MaxResults = p.options.Limit + + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) + result, err := p.client.ListContributorInsights(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +// ListContributorInsightsAPIClient is a client that implements the +// ListContributorInsights operation. +type ListContributorInsightsAPIClient interface { + ListContributorInsights(context.Context, *ListContributorInsightsInput, ...func(*Options)) (*ListContributorInsightsOutput, error) +} + +var _ ListContributorInsightsAPIClient = (*Client)(nil) + +func newServiceMetadataMiddleware_opListContributorInsights(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListContributorInsights", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListExports.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListExports.go new file mode 100644 index 0000000000..33c8aef465 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListExports.go @@ -0,0 +1,304 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package dynamodb + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Lists completed exports within the past 90 days. +func (c *Client) ListExports(ctx context.Context, params *ListExportsInput, optFns ...func(*Options)) (*ListExportsOutput, error) { + if params == nil { + params = &ListExportsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListExports", params, optFns, c.addOperationListExportsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListExportsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListExportsInput struct { + + // Maximum number of results to return per page. + MaxResults *int32 + + // An optional string that, if supplied, must be copied from the output of a + // previous call to ListExports . When provided in this manner, the API fetches the + // next page of results. + NextToken *string + + // The Amazon Resource Name (ARN) associated with the exported table. + TableArn *string + + noSmithyDocumentSerde +} + +func (in *ListExportsInput) bindEndpointParams(p *EndpointParameters) { + + p.ResourceArn = in.TableArn + +} + +type ListExportsOutput struct { + + // A list of ExportSummary objects. + ExportSummaries []types.ExportSummary + + // If this value is returned, there are additional results to be displayed. To + // retrieve them, call ListExports again, with NextToken set to this value. + NextToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListExportsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson10_serializeOpListExports{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpListExports{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListExports"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListExports(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addValidateResponseChecksum(stack, options); err != nil { + return err + } + if err = addAcceptEncodingGzip(stack, options); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +// ListExportsPaginatorOptions is the paginator options for ListExports +type ListExportsPaginatorOptions struct { + // Maximum number of results to return per page. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListExportsPaginator is a paginator for ListExports +type ListExportsPaginator struct { + options ListExportsPaginatorOptions + client ListExportsAPIClient + params *ListExportsInput + nextToken *string + firstPage bool +} + +// NewListExportsPaginator returns a new ListExportsPaginator +func NewListExportsPaginator(client ListExportsAPIClient, params *ListExportsInput, optFns ...func(*ListExportsPaginatorOptions)) *ListExportsPaginator { + if params == nil { + params = &ListExportsInput{} + } + + options := ListExportsPaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListExportsPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListExportsPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListExports page. +func (p *ListExportsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListExportsOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) + result, err := p.client.ListExports(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +// ListExportsAPIClient is a client that implements the ListExports operation. +type ListExportsAPIClient interface { + ListExports(context.Context, *ListExportsInput, ...func(*Options)) (*ListExportsOutput, error) +} + +var _ ListExportsAPIClient = (*Client)(nil) + +func newServiceMetadataMiddleware_opListExports(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListExports", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListGlobalTables.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListGlobalTables.go new file mode 100644 index 0000000000..dcf73cdadb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListGlobalTables.go @@ -0,0 +1,268 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package dynamodb + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Lists all global tables that have a replica in the specified Region. +// +// This documentation is for version 2017.11.29 (Legacy) of global tables, which +// should be avoided for new global tables. Customers should use [Global Tables version 2019.11.21 (Current)]when possible, +// because it provides greater flexibility, higher efficiency, and consumes less +// write capacity than 2017.11.29 (Legacy). +// +// To determine which version you're using, see [Determining the global table version you are using]. To update existing global tables +// from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see [Upgrading global tables]. +// +// [Global Tables version 2019.11.21 (Current)]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GlobalTables.html +// [Upgrading global tables]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/V2globaltables_upgrade.html +// [Determining the global table version you are using]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.DetermineVersion.html +func (c *Client) ListGlobalTables(ctx context.Context, params *ListGlobalTablesInput, optFns ...func(*Options)) (*ListGlobalTablesOutput, error) { + if params == nil { + params = &ListGlobalTablesInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListGlobalTables", params, optFns, c.addOperationListGlobalTablesMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListGlobalTablesOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListGlobalTablesInput struct { + + // The first global table name that this operation will evaluate. + ExclusiveStartGlobalTableName *string + + // The maximum number of table names to return, if the parameter is not specified + // DynamoDB defaults to 100. + // + // If the number of global tables DynamoDB finds reaches this limit, it stops the + // operation and returns the table names collected up to that point, with a table + // name in the LastEvaluatedGlobalTableName to apply in a subsequent operation to + // the ExclusiveStartGlobalTableName parameter. + Limit *int32 + + // Lists the global tables in a specific Region. + RegionName *string + + noSmithyDocumentSerde +} + +type ListGlobalTablesOutput struct { + + // List of global table names. + GlobalTables []types.GlobalTable + + // Last evaluated global table name. + LastEvaluatedGlobalTableName *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListGlobalTablesMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson10_serializeOpListGlobalTables{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpListGlobalTables{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListGlobalTables"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpListGlobalTablesDiscoverEndpointMiddleware(stack, options, c); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListGlobalTables(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addValidateResponseChecksum(stack, options); err != nil { + return err + } + if err = addAcceptEncodingGzip(stack, options); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func addOpListGlobalTablesDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error { + return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{ + Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){ + func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) { + opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS + opt.Logger = o.Logger + }, + }, + DiscoverOperation: c.fetchOpListGlobalTablesDiscoverEndpoint, + EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery, + EndpointDiscoveryRequired: false, + Region: o.Region, + }, "ResolveEndpointV2", middleware.After) +} + +func (c *Client) fetchOpListGlobalTablesDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) { + input := getOperationInput(ctx) + in, ok := input.(*ListGlobalTablesInput) + if !ok { + return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input) + } + _ = in + + identifierMap := make(map[string]string, 0) + identifierMap["sdk#Region"] = region + + key := fmt.Sprintf("DynamoDB.%v", identifierMap) + + if v, ok := c.endpointCache.Get(key); ok { + return v, nil + } + + discoveryOperationInput := &DescribeEndpointsInput{} + + opt := internalEndpointDiscovery.DiscoverEndpointOptions{} + for _, fn := range optFns { + fn(&opt) + } + + go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt) + return internalEndpointDiscovery.WeightedAddress{}, nil +} + +func newServiceMetadataMiddleware_opListGlobalTables(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListGlobalTables", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListImports.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListImports.go new file mode 100644 index 0000000000..5a02bf9c9c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListImports.go @@ -0,0 +1,304 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package dynamodb + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Lists completed imports within the past 90 days. +func (c *Client) ListImports(ctx context.Context, params *ListImportsInput, optFns ...func(*Options)) (*ListImportsOutput, error) { + if params == nil { + params = &ListImportsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListImports", params, optFns, c.addOperationListImportsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListImportsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListImportsInput struct { + + // An optional string that, if supplied, must be copied from the output of a + // previous call to ListImports . When provided in this manner, the API fetches the + // next page of results. + NextToken *string + + // The number of ImportSummary objects returned in a single page. + PageSize *int32 + + // The Amazon Resource Name (ARN) associated with the table that was imported to. + TableArn *string + + noSmithyDocumentSerde +} + +func (in *ListImportsInput) bindEndpointParams(p *EndpointParameters) { + + p.ResourceArn = in.TableArn + +} + +type ListImportsOutput struct { + + // A list of ImportSummary objects. + ImportSummaryList []types.ImportSummary + + // If this value is returned, there are additional results to be displayed. To + // retrieve them, call ListImports again, with NextToken set to this value. + NextToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListImportsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson10_serializeOpListImports{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpListImports{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListImports"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListImports(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addValidateResponseChecksum(stack, options); err != nil { + return err + } + if err = addAcceptEncodingGzip(stack, options); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +// ListImportsPaginatorOptions is the paginator options for ListImports +type ListImportsPaginatorOptions struct { + // The number of ImportSummary objects returned in a single page. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListImportsPaginator is a paginator for ListImports +type ListImportsPaginator struct { + options ListImportsPaginatorOptions + client ListImportsAPIClient + params *ListImportsInput + nextToken *string + firstPage bool +} + +// NewListImportsPaginator returns a new ListImportsPaginator +func NewListImportsPaginator(client ListImportsAPIClient, params *ListImportsInput, optFns ...func(*ListImportsPaginatorOptions)) *ListImportsPaginator { + if params == nil { + params = &ListImportsInput{} + } + + options := ListImportsPaginatorOptions{} + if params.PageSize != nil { + options.Limit = *params.PageSize + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListImportsPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListImportsPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListImports page. +func (p *ListImportsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListImportsOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.PageSize = limit + + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) + result, err := p.client.ListImports(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +// ListImportsAPIClient is a client that implements the ListImports operation. +type ListImportsAPIClient interface { + ListImports(context.Context, *ListImportsInput, ...func(*Options)) (*ListImportsOutput, error) +} + +var _ ListImportsAPIClient = (*Client)(nil) + +func newServiceMetadataMiddleware_opListImports(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListImports", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListTables.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListTables.go new file mode 100644 index 0000000000..ec575e9679 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListTables.go @@ -0,0 +1,356 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package dynamodb + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns an array of table names associated with the current account and +// endpoint. The output from ListTables is paginated, with each page returning a +// maximum of 100 table names. +func (c *Client) ListTables(ctx context.Context, params *ListTablesInput, optFns ...func(*Options)) (*ListTablesOutput, error) { + if params == nil { + params = &ListTablesInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListTables", params, optFns, c.addOperationListTablesMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListTablesOutput) + out.ResultMetadata = metadata + return out, nil +} + +// Represents the input of a ListTables operation. +type ListTablesInput struct { + + // The first table name that this operation will evaluate. Use the value that was + // returned for LastEvaluatedTableName in a previous operation, so that you can + // obtain the next page of results. + ExclusiveStartTableName *string + + // A maximum number of table names to return. If this parameter is not specified, + // the limit is 100. + Limit *int32 + + noSmithyDocumentSerde +} + +// Represents the output of a ListTables operation. +type ListTablesOutput struct { + + // The name of the last table in the current page of results. Use this value as + // the ExclusiveStartTableName in a new request to obtain the next page of + // results, until all the table names are returned. + // + // If you do not receive a LastEvaluatedTableName value in the response, this + // means that there are no more table names to be retrieved. + LastEvaluatedTableName *string + + // The names of the tables associated with the current account at the current + // endpoint. The maximum size of this array is 100. + // + // If LastEvaluatedTableName also appears in the output, you can use this value as + // the ExclusiveStartTableName parameter in a subsequent ListTables request and + // obtain the next page of results. + TableNames []string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListTablesMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson10_serializeOpListTables{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpListTables{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListTables"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpListTablesDiscoverEndpointMiddleware(stack, options, c); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListTables(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addValidateResponseChecksum(stack, options); err != nil { + return err + } + if err = addAcceptEncodingGzip(stack, options); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +// ListTablesPaginatorOptions is the paginator options for ListTables +type ListTablesPaginatorOptions struct { + // A maximum number of table names to return. If this parameter is not specified, + // the limit is 100. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListTablesPaginator is a paginator for ListTables +type ListTablesPaginator struct { + options ListTablesPaginatorOptions + client ListTablesAPIClient + params *ListTablesInput + nextToken *string + firstPage bool +} + +// NewListTablesPaginator returns a new ListTablesPaginator +func NewListTablesPaginator(client ListTablesAPIClient, params *ListTablesInput, optFns ...func(*ListTablesPaginatorOptions)) *ListTablesPaginator { + if params == nil { + params = &ListTablesInput{} + } + + options := ListTablesPaginatorOptions{} + if params.Limit != nil { + options.Limit = *params.Limit + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListTablesPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.ExclusiveStartTableName, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListTablesPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListTables page. +func (p *ListTablesPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListTablesOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.ExclusiveStartTableName = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.Limit = limit + + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) + result, err := p.client.ListTables(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.LastEvaluatedTableName + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +func addOpListTablesDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error { + return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{ + Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){ + func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) { + opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS + opt.Logger = o.Logger + }, + }, + DiscoverOperation: c.fetchOpListTablesDiscoverEndpoint, + EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery, + EndpointDiscoveryRequired: false, + Region: o.Region, + }, "ResolveEndpointV2", middleware.After) +} + +func (c *Client) fetchOpListTablesDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) { + input := getOperationInput(ctx) + in, ok := input.(*ListTablesInput) + if !ok { + return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input) + } + _ = in + + identifierMap := make(map[string]string, 0) + identifierMap["sdk#Region"] = region + + key := fmt.Sprintf("DynamoDB.%v", identifierMap) + + if v, ok := c.endpointCache.Get(key); ok { + return v, nil + } + + discoveryOperationInput := &DescribeEndpointsInput{} + + opt := internalEndpointDiscovery.DiscoverEndpointOptions{} + for _, fn := range optFns { + fn(&opt) + } + + go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt) + return internalEndpointDiscovery.WeightedAddress{}, nil +} + +// ListTablesAPIClient is a client that implements the ListTables operation. +type ListTablesAPIClient interface { + ListTables(context.Context, *ListTablesInput, ...func(*Options)) (*ListTablesOutput, error) +} + +var _ ListTablesAPIClient = (*Client)(nil) + +func newServiceMetadataMiddleware_opListTables(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListTables", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListTagsOfResource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListTagsOfResource.go new file mode 100644 index 0000000000..6d9ed73927 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListTagsOfResource.go @@ -0,0 +1,268 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package dynamodb + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// List all tags on an Amazon DynamoDB resource. You can call ListTagsOfResource +// up to 10 times per second, per account. +// +// For an overview on tagging DynamoDB resources, see [Tagging for DynamoDB] in the Amazon DynamoDB +// Developer Guide. +// +// [Tagging for DynamoDB]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html +func (c *Client) ListTagsOfResource(ctx context.Context, params *ListTagsOfResourceInput, optFns ...func(*Options)) (*ListTagsOfResourceOutput, error) { + if params == nil { + params = &ListTagsOfResourceInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListTagsOfResource", params, optFns, c.addOperationListTagsOfResourceMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListTagsOfResourceOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListTagsOfResourceInput struct { + + // The Amazon DynamoDB resource with tags to be listed. This value is an Amazon + // Resource Name (ARN). + // + // This member is required. + ResourceArn *string + + // An optional string that, if supplied, must be copied from the output of a + // previous call to ListTagOfResource. When provided in this manner, this API + // fetches the next page of results. + NextToken *string + + noSmithyDocumentSerde +} + +func (in *ListTagsOfResourceInput) bindEndpointParams(p *EndpointParameters) { + + p.ResourceArn = in.ResourceArn + +} + +type ListTagsOfResourceOutput struct { + + // If this value is returned, there are additional results to be displayed. To + // retrieve them, call ListTagsOfResource again, with NextToken set to this value. + NextToken *string + + // The tags currently associated with the Amazon DynamoDB resource. + Tags []types.Tag + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListTagsOfResourceMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson10_serializeOpListTagsOfResource{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpListTagsOfResource{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListTagsOfResource"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpListTagsOfResourceDiscoverEndpointMiddleware(stack, options, c); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpListTagsOfResourceValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListTagsOfResource(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addValidateResponseChecksum(stack, options); err != nil { + return err + } + if err = addAcceptEncodingGzip(stack, options); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func addOpListTagsOfResourceDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error { + return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{ + Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){ + func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) { + opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS + opt.Logger = o.Logger + }, + }, + DiscoverOperation: c.fetchOpListTagsOfResourceDiscoverEndpoint, + EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery, + EndpointDiscoveryRequired: false, + Region: o.Region, + }, "ResolveEndpointV2", middleware.After) +} + +func (c *Client) fetchOpListTagsOfResourceDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) { + input := getOperationInput(ctx) + in, ok := input.(*ListTagsOfResourceInput) + if !ok { + return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input) + } + _ = in + + identifierMap := make(map[string]string, 0) + identifierMap["sdk#Region"] = region + + key := fmt.Sprintf("DynamoDB.%v", identifierMap) + + if v, ok := c.endpointCache.Get(key); ok { + return v, nil + } + + discoveryOperationInput := &DescribeEndpointsInput{} + + opt := internalEndpointDiscovery.DiscoverEndpointOptions{} + for _, fn := range optFns { + fn(&opt) + } + + go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt) + return internalEndpointDiscovery.WeightedAddress{}, nil +} + +func newServiceMetadataMiddleware_opListTagsOfResource(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListTagsOfResource", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_PutItem.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_PutItem.go new file mode 100644 index 0000000000..43bd113d2c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_PutItem.go @@ -0,0 +1,483 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package dynamodb + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates a new item, or replaces an old item with a new item. If an item that +// has the same primary key as the new item already exists in the specified table, +// the new item completely replaces the existing item. You can perform a +// conditional put operation (add a new item if one with the specified primary key +// doesn't exist), or replace an existing item if it has certain attribute values. +// You can return the item's attribute values in the same operation, using the +// ReturnValues parameter. +// +// When you add an item, the primary key attributes are the only required +// attributes. +// +// Empty String and Binary attribute values are allowed. Attribute values of type +// String and Binary must have a length greater than zero if the attribute is used +// as a key attribute for a table or index. Set type attributes cannot be empty. +// +// Invalid Requests with empty values will be rejected with a ValidationException +// exception. +// +// To prevent a new item from replacing an existing item, use a conditional +// expression that contains the attribute_not_exists function with the name of the +// attribute being used as the partition key for the table. Since every record must +// contain that attribute, the attribute_not_exists function will only succeed if +// no matching item exists. +// +// For more information about PutItem , see [Working with Items] in the Amazon DynamoDB Developer +// Guide. +// +// [Working with Items]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithItems.html +func (c *Client) PutItem(ctx context.Context, params *PutItemInput, optFns ...func(*Options)) (*PutItemOutput, error) { + if params == nil { + params = &PutItemInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutItem", params, optFns, c.addOperationPutItemMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutItemOutput) + out.ResultMetadata = metadata + return out, nil +} + +// Represents the input of a PutItem operation. +type PutItemInput struct { + + // A map of attribute name/value pairs, one for each attribute. Only the primary + // key attributes are required; you can optionally provide other attribute + // name-value pairs for the item. + // + // You must provide all of the attributes for the primary key. For example, with a + // simple primary key, you only need to provide a value for the partition key. For + // a composite primary key, you must provide both values for both the partition key + // and the sort key. + // + // If you specify any attributes that are part of an index key, then the data + // types for those attributes must match those of the schema in the table's + // attribute definition. + // + // Empty String and Binary attribute values are allowed. Attribute values of type + // String and Binary must have a length greater than zero if the attribute is used + // as a key attribute for a table or index. + // + // For more information about primary keys, see [Primary Key] in the Amazon DynamoDB Developer + // Guide. + // + // Each element in the Item map is an AttributeValue object. + // + // [Primary Key]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.CoreComponents.html#HowItWorks.CoreComponents.PrimaryKey + // + // This member is required. + Item map[string]types.AttributeValue + + // The name of the table to contain the item. You can also provide the Amazon + // Resource Name (ARN) of the table in this parameter. + // + // This member is required. + TableName *string + + // A condition that must be satisfied in order for a conditional PutItem operation + // to succeed. + // + // An expression can contain any of the following: + // + // - Functions: attribute_exists | attribute_not_exists | attribute_type | + // contains | begins_with | size + // + // These function names are case-sensitive. + // + // - Comparison operators: = | <> | < | > | <= | >= | BETWEEN | IN + // + // - Logical operators: AND | OR | NOT + // + // For more information on condition expressions, see [Condition Expressions] in the Amazon DynamoDB + // Developer Guide. + // + // [Condition Expressions]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html + ConditionExpression *string + + // This is a legacy parameter. Use ConditionExpression instead. For more + // information, see [ConditionalOperator]in the Amazon DynamoDB Developer Guide. + // + // [ConditionalOperator]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html + ConditionalOperator types.ConditionalOperator + + // This is a legacy parameter. Use ConditionExpression instead. For more + // information, see [Expected]in the Amazon DynamoDB Developer Guide. + // + // [Expected]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.Expected.html + Expected map[string]types.ExpectedAttributeValue + + // One or more substitution tokens for attribute names in an expression. The + // following are some use cases for using ExpressionAttributeNames : + // + // - To access an attribute whose name conflicts with a DynamoDB reserved word. + // + // - To create a placeholder for repeating occurrences of an attribute name in + // an expression. + // + // - To prevent special characters in an attribute name from being + // misinterpreted in an expression. + // + // Use the # character in an expression to dereference an attribute name. For + // example, consider the following attribute name: + // + // - Percentile + // + // The name of this attribute conflicts with a reserved word, so it cannot be used + // directly in an expression. (For the complete list of reserved words, see [Reserved Words]in the + // Amazon DynamoDB Developer Guide). To work around this, you could specify the + // following for ExpressionAttributeNames : + // + // - {"#P":"Percentile"} + // + // You could then use this substitution in an expression, as in this example: + // + // - #P = :val + // + // Tokens that begin with the : character are expression attribute values, which + // are placeholders for the actual value at runtime. + // + // For more information on expression attribute names, see [Specifying Item Attributes] in the Amazon DynamoDB + // Developer Guide. + // + // [Reserved Words]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html + // [Specifying Item Attributes]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html + ExpressionAttributeNames map[string]string + + // One or more values that can be substituted in an expression. + // + // Use the : (colon) character in an expression to dereference an attribute value. + // For example, suppose that you wanted to check whether the value of the + // ProductStatus attribute was one of the following: + // + // Available | Backordered | Discontinued + // + // You would first need to specify ExpressionAttributeValues as follows: + // + // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, + // ":disc":{"S":"Discontinued"} } + // + // You could then use these values in an expression, such as this: + // + // ProductStatus IN (:avail, :back, :disc) + // + // For more information on expression attribute values, see [Condition Expressions] in the Amazon + // DynamoDB Developer Guide. + // + // [Condition Expressions]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html + ExpressionAttributeValues map[string]types.AttributeValue + + // Determines the level of detail about either provisioned or on-demand throughput + // consumption that is returned in the response: + // + // - INDEXES - The response includes the aggregate ConsumedCapacity for the + // operation, together with ConsumedCapacity for each table and secondary index + // that was accessed. + // + // Note that some operations, such as GetItem and BatchGetItem , do not access any + // indexes at all. In these cases, specifying INDEXES will only return + // ConsumedCapacity information for table(s). + // + // - TOTAL - The response includes only the aggregate ConsumedCapacity for the + // operation. + // + // - NONE - No ConsumedCapacity details are included in the response. + ReturnConsumedCapacity types.ReturnConsumedCapacity + + // Determines whether item collection metrics are returned. If set to SIZE , the + // response includes statistics about item collections, if any, that were modified + // during the operation are returned in the response. If set to NONE (the + // default), no statistics are returned. + ReturnItemCollectionMetrics types.ReturnItemCollectionMetrics + + // Use ReturnValues if you want to get the item attributes as they appeared before + // they were updated with the PutItem request. For PutItem , the valid values are: + // + // - NONE - If ReturnValues is not specified, or if its value is NONE , then + // nothing is returned. (This setting is the default for ReturnValues .) + // + // - ALL_OLD - If PutItem overwrote an attribute name-value pair, then the + // content of the old item is returned. + // + // The values returned are strongly consistent. + // + // There is no additional cost associated with requesting a return value aside + // from the small network and processing overhead of receiving a larger response. + // No read capacity units are consumed. + // + // The ReturnValues parameter is used by several DynamoDB operations; however, + // PutItem does not recognize any values other than NONE or ALL_OLD . + ReturnValues types.ReturnValue + + // An optional parameter that returns the item attributes for a PutItem operation + // that failed a condition check. + // + // There is no additional cost associated with requesting a return value aside + // from the small network and processing overhead of receiving a larger response. + // No read capacity units are consumed. + ReturnValuesOnConditionCheckFailure types.ReturnValuesOnConditionCheckFailure + + noSmithyDocumentSerde +} + +func (in *PutItemInput) bindEndpointParams(p *EndpointParameters) { + + p.ResourceArn = in.TableName + +} + +// Represents the output of a PutItem operation. +type PutItemOutput struct { + + // The attribute values as they appeared before the PutItem operation, but only if + // ReturnValues is specified as ALL_OLD in the request. Each element consists of + // an attribute name and an attribute value. + Attributes map[string]types.AttributeValue + + // The capacity units consumed by the PutItem operation. The data returned + // includes the total provisioned throughput consumed, along with statistics for + // the table and any indexes involved in the operation. ConsumedCapacity is only + // returned if the ReturnConsumedCapacity parameter was specified. For more + // information, see [Capacity unity consumption for write operations]in the Amazon DynamoDB Developer Guide. + // + // [Capacity unity consumption for write operations]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/read-write-operations.html#write-operation-consumption + ConsumedCapacity *types.ConsumedCapacity + + // Information about item collections, if any, that were affected by the PutItem + // operation. ItemCollectionMetrics is only returned if the + // ReturnItemCollectionMetrics parameter was specified. If the table does not have + // any local secondary indexes, this information is not returned in the response. + // + // Each ItemCollectionMetrics element consists of: + // + // - ItemCollectionKey - The partition key value of the item collection. This is + // the same as the partition key value of the item itself. + // + // - SizeEstimateRangeGB - An estimate of item collection size, in gigabytes. + // This value is a two-element array containing a lower bound and an upper bound + // for the estimate. The estimate includes the size of all the items in the table, + // plus the size of all attributes projected into all of the local secondary + // indexes on that table. Use this estimate to measure whether a local secondary + // index is approaching its size limit. + // + // The estimate is subject to change over time; therefore, do not rely on the + // precision or accuracy of the estimate. + ItemCollectionMetrics *types.ItemCollectionMetrics + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutItemMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson10_serializeOpPutItem{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpPutItem{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "PutItem"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpPutItemDiscoverEndpointMiddleware(stack, options, c); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpPutItemValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutItem(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addValidateResponseChecksum(stack, options); err != nil { + return err + } + if err = addAcceptEncodingGzip(stack, options); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func addOpPutItemDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error { + return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{ + Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){ + func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) { + opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS + opt.Logger = o.Logger + }, + }, + DiscoverOperation: c.fetchOpPutItemDiscoverEndpoint, + EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery, + EndpointDiscoveryRequired: false, + Region: o.Region, + }, "ResolveEndpointV2", middleware.After) +} + +func (c *Client) fetchOpPutItemDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) { + input := getOperationInput(ctx) + in, ok := input.(*PutItemInput) + if !ok { + return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input) + } + _ = in + + identifierMap := make(map[string]string, 0) + identifierMap["sdk#Region"] = region + + key := fmt.Sprintf("DynamoDB.%v", identifierMap) + + if v, ok := c.endpointCache.Get(key); ok { + return v, nil + } + + discoveryOperationInput := &DescribeEndpointsInput{} + + opt := internalEndpointDiscovery.DiscoverEndpointOptions{} + for _, fn := range optFns { + fn(&opt) + } + + go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt) + return internalEndpointDiscovery.WeightedAddress{}, nil +} + +func newServiceMetadataMiddleware_opPutItem(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "PutItem", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_PutResourcePolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_PutResourcePolicy.go new file mode 100644 index 0000000000..d14c3e1f56 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_PutResourcePolicy.go @@ -0,0 +1,310 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package dynamodb + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Attaches a resource-based policy document to the resource, which can be a table +// or stream. When you attach a resource-based policy using this API, the policy +// application is [eventually consistent]. +// +// PutResourcePolicy is an idempotent operation; running it multiple times on the +// same resource using the same policy document will return the same revision ID. +// If you specify an ExpectedRevisionId that doesn't match the current policy's +// RevisionId , the PolicyNotFoundException will be returned. +// +// PutResourcePolicy is an asynchronous operation. If you issue a GetResourcePolicy +// request immediately after a PutResourcePolicy request, DynamoDB might return +// your previous policy, if there was one, or return the PolicyNotFoundException . +// This is because GetResourcePolicy uses an eventually consistent query, and the +// metadata for your policy or table might not be available at that moment. Wait +// for a few seconds, and then try the GetResourcePolicy request again. +// +// [eventually consistent]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadConsistency.html +func (c *Client) PutResourcePolicy(ctx context.Context, params *PutResourcePolicyInput, optFns ...func(*Options)) (*PutResourcePolicyOutput, error) { + if params == nil { + params = &PutResourcePolicyInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutResourcePolicy", params, optFns, c.addOperationPutResourcePolicyMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutResourcePolicyOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutResourcePolicyInput struct { + + // An Amazon Web Services resource-based policy document in JSON format. + // + // - The maximum size supported for a resource-based policy document is 20 KB. + // DynamoDB counts whitespaces when calculating the size of a policy against this + // limit. + // + // - Within a resource-based policy, if the action for a DynamoDB service-linked + // role (SLR) to replicate data for a global table is denied, adding or deleting a + // replica will fail with an error. + // + // For a full list of all considerations that apply while attaching a + // resource-based policy, see [Resource-based policy considerations]. + // + // [Resource-based policy considerations]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/rbac-considerations.html + // + // This member is required. + Policy *string + + // The Amazon Resource Name (ARN) of the DynamoDB resource to which the policy + // will be attached. The resources you can specify include tables and streams. + // + // You can control index permissions using the base table's policy. To specify the + // same permission level for your table and its indexes, you can provide both the + // table and index Amazon Resource Name (ARN)s in the Resource field of a given + // Statement in your policy document. Alternatively, to specify different + // permissions for your table, indexes, or both, you can define multiple Statement + // fields in your policy document. + // + // This member is required. + ResourceArn *string + + // Set this parameter to true to confirm that you want to remove your permissions + // to change the policy of this resource in the future. + ConfirmRemoveSelfResourceAccess bool + + // A string value that you can use to conditionally update your policy. You can + // provide the revision ID of your existing policy to make mutating requests + // against that policy. + // + // When you provide an expected revision ID, if the revision ID of the existing + // policy on the resource doesn't match or if there's no policy attached to the + // resource, your request will be rejected with a PolicyNotFoundException . + // + // To conditionally attach a policy when no policy exists for the resource, + // specify NO_POLICY for the revision ID. + ExpectedRevisionId *string + + noSmithyDocumentSerde +} + +func (in *PutResourcePolicyInput) bindEndpointParams(p *EndpointParameters) { + + p.ResourceArn = in.ResourceArn + +} + +type PutResourcePolicyOutput struct { + + // A unique string that represents the revision ID of the policy. If you're + // comparing revision IDs, make sure to always use string comparison logic. + RevisionId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutResourcePolicyMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson10_serializeOpPutResourcePolicy{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpPutResourcePolicy{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "PutResourcePolicy"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpPutResourcePolicyDiscoverEndpointMiddleware(stack, options, c); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpPutResourcePolicyValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutResourcePolicy(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addValidateResponseChecksum(stack, options); err != nil { + return err + } + if err = addAcceptEncodingGzip(stack, options); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func addOpPutResourcePolicyDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error { + return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{ + Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){ + func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) { + opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS + opt.Logger = o.Logger + }, + }, + DiscoverOperation: c.fetchOpPutResourcePolicyDiscoverEndpoint, + EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery, + EndpointDiscoveryRequired: false, + Region: o.Region, + }, "ResolveEndpointV2", middleware.After) +} + +func (c *Client) fetchOpPutResourcePolicyDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) { + input := getOperationInput(ctx) + in, ok := input.(*PutResourcePolicyInput) + if !ok { + return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input) + } + _ = in + + identifierMap := make(map[string]string, 0) + identifierMap["sdk#Region"] = region + + key := fmt.Sprintf("DynamoDB.%v", identifierMap) + + if v, ok := c.endpointCache.Get(key); ok { + return v, nil + } + + discoveryOperationInput := &DescribeEndpointsInput{} + + opt := internalEndpointDiscovery.DiscoverEndpointOptions{} + for _, fn := range optFns { + fn(&opt) + } + + go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt) + return internalEndpointDiscovery.WeightedAddress{}, nil +} + +func newServiceMetadataMiddleware_opPutResourcePolicy(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "PutResourcePolicy", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_Query.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_Query.go new file mode 100644 index 0000000000..d0e6f18362 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_Query.go @@ -0,0 +1,745 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package dynamodb + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// You must provide the name of the partition key attribute and a single value for +// that attribute. Query returns all items with that partition key value. +// Optionally, you can provide a sort key attribute and use a comparison operator +// to refine the search results. +// +// Use the KeyConditionExpression parameter to provide a specific value for the +// partition key. The Query operation will return all of the items from the table +// or index with that partition key value. You can optionally narrow the scope of +// the Query operation by specifying a sort key value and a comparison operator in +// KeyConditionExpression . To further refine the Query results, you can +// optionally provide a FilterExpression . A FilterExpression determines which +// items within the results should be returned to you. All of the other results are +// discarded. +// +// A Query operation always returns a result set. If no matching items are found, +// the result set will be empty. Queries that do not return results consume the +// minimum number of read capacity units for that type of read operation. +// +// DynamoDB calculates the number of read capacity units consumed based on item +// size, not on the amount of data that is returned to an application. The number +// of capacity units consumed will be the same whether you request all of the +// attributes (the default behavior) or just some of them (using a projection +// expression). The number will also be the same whether or not you use a +// FilterExpression . +// +// Query results are always sorted by the sort key value. If the data type of the +// sort key is Number, the results are returned in numeric order; otherwise, the +// results are returned in order of UTF-8 bytes. By default, the sort order is +// ascending. To reverse the order, set the ScanIndexForward parameter to false. +// +// A single Query operation will read up to the maximum number of items set (if +// using the Limit parameter) or a maximum of 1 MB of data and then apply any +// filtering to the results using FilterExpression . If LastEvaluatedKey is +// present in the response, you will need to paginate the result set. For more +// information, see [Paginating the Results]in the Amazon DynamoDB Developer Guide. +// +// FilterExpression is applied after a Query finishes, but before the results are +// returned. A FilterExpression cannot contain partition key or sort key +// attributes. You need to specify those attributes in the KeyConditionExpression . +// +// A Query operation can return an empty result set and a LastEvaluatedKey if all +// the items read for the page of results are filtered out. +// +// You can query a table, a local secondary index, or a global secondary index. +// For a query on a table or on a local secondary index, you can set the +// ConsistentRead parameter to true and obtain a strongly consistent result. +// Global secondary indexes support eventually consistent reads only, so do not +// specify ConsistentRead when querying a global secondary index. +// +// [Paginating the Results]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Query.html#Query.Pagination +func (c *Client) Query(ctx context.Context, params *QueryInput, optFns ...func(*Options)) (*QueryOutput, error) { + if params == nil { + params = &QueryInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "Query", params, optFns, c.addOperationQueryMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*QueryOutput) + out.ResultMetadata = metadata + return out, nil +} + +// Represents the input of a Query operation. +type QueryInput struct { + + // The name of the table containing the requested items. You can also provide the + // Amazon Resource Name (ARN) of the table in this parameter. + // + // This member is required. + TableName *string + + // This is a legacy parameter. Use ProjectionExpression instead. For more + // information, see [AttributesToGet]in the Amazon DynamoDB Developer Guide. + // + // [AttributesToGet]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributesToGet.html + AttributesToGet []string + + // This is a legacy parameter. Use FilterExpression instead. For more information, + // see [ConditionalOperator]in the Amazon DynamoDB Developer Guide. + // + // [ConditionalOperator]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html + ConditionalOperator types.ConditionalOperator + + // Determines the read consistency model: If set to true , then the operation uses + // strongly consistent reads; otherwise, the operation uses eventually consistent + // reads. + // + // Strongly consistent reads are not supported on global secondary indexes. If you + // query a global secondary index with ConsistentRead set to true , you will + // receive a ValidationException . + ConsistentRead *bool + + // The primary key of the first item that this operation will evaluate. Use the + // value that was returned for LastEvaluatedKey in the previous operation. + // + // The data type for ExclusiveStartKey must be String, Number, or Binary. No set + // data types are allowed. + ExclusiveStartKey map[string]types.AttributeValue + + // One or more substitution tokens for attribute names in an expression. The + // following are some use cases for using ExpressionAttributeNames : + // + // - To access an attribute whose name conflicts with a DynamoDB reserved word. + // + // - To create a placeholder for repeating occurrences of an attribute name in + // an expression. + // + // - To prevent special characters in an attribute name from being + // misinterpreted in an expression. + // + // Use the # character in an expression to dereference an attribute name. For + // example, consider the following attribute name: + // + // - Percentile + // + // The name of this attribute conflicts with a reserved word, so it cannot be used + // directly in an expression. (For the complete list of reserved words, see [Reserved Words]in the + // Amazon DynamoDB Developer Guide). To work around this, you could specify the + // following for ExpressionAttributeNames : + // + // - {"#P":"Percentile"} + // + // You could then use this substitution in an expression, as in this example: + // + // - #P = :val + // + // Tokens that begin with the : character are expression attribute values, which + // are placeholders for the actual value at runtime. + // + // For more information on expression attribute names, see [Specifying Item Attributes] in the Amazon DynamoDB + // Developer Guide. + // + // [Reserved Words]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html + // [Specifying Item Attributes]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html + ExpressionAttributeNames map[string]string + + // One or more values that can be substituted in an expression. + // + // Use the : (colon) character in an expression to dereference an attribute value. + // For example, suppose that you wanted to check whether the value of the + // ProductStatus attribute was one of the following: + // + // Available | Backordered | Discontinued + // + // You would first need to specify ExpressionAttributeValues as follows: + // + // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, + // ":disc":{"S":"Discontinued"} } + // + // You could then use these values in an expression, such as this: + // + // ProductStatus IN (:avail, :back, :disc) + // + // For more information on expression attribute values, see [Specifying Conditions] in the Amazon + // DynamoDB Developer Guide. + // + // [Specifying Conditions]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html + ExpressionAttributeValues map[string]types.AttributeValue + + // A string that contains conditions that DynamoDB applies after the Query + // operation, but before the data is returned to you. Items that do not satisfy the + // FilterExpression criteria are not returned. + // + // A FilterExpression does not allow key attributes. You cannot define a filter + // expression based on a partition key or a sort key. + // + // A FilterExpression is applied after the items have already been read; the + // process of filtering does not consume any additional read capacity units. + // + // For more information, see [Filter Expressions] in the Amazon DynamoDB Developer Guide. + // + // [Filter Expressions]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Query.FilterExpression.html + FilterExpression *string + + // The name of an index to query. This index can be any local secondary index or + // global secondary index on the table. Note that if you use the IndexName + // parameter, you must also provide TableName. + IndexName *string + + // The condition that specifies the key values for items to be retrieved by the + // Query action. + // + // The condition must perform an equality test on a single partition key value. + // + // The condition can optionally perform one of several comparison tests on a + // single sort key value. This allows Query to retrieve one item with a given + // partition key value and sort key value, or several items that have the same + // partition key value but different sort key values. + // + // The partition key equality test is required, and must be specified in the + // following format: + // + // partitionKeyName = :partitionkeyval + // + // If you also want to provide a condition for the sort key, it must be combined + // using AND with the condition for the sort key. Following is an example, using + // the = comparison operator for the sort key: + // + // partitionKeyName + // + // = + // + // :partitionkeyval + // + // AND + // + // sortKeyName + // + // = + // + // :sortkeyval + // + // Valid comparisons for the sort key condition are as follows: + // + // - sortKeyName = :sortkeyval - true if the sort key value is equal to + // :sortkeyval . + // + // - sortKeyName < :sortkeyval - true if the sort key value is less than + // :sortkeyval . + // + // - sortKeyName <= :sortkeyval - true if the sort key value is less than or + // equal to :sortkeyval . + // + // - sortKeyName > :sortkeyval - true if the sort key value is greater than + // :sortkeyval . + // + // - sortKeyName >= :sortkeyval - true if the sort key value is greater than or + // equal to :sortkeyval . + // + // - sortKeyName BETWEEN :sortkeyval1 AND :sortkeyval2 - true if the sort key + // value is greater than or equal to :sortkeyval1 , and less than or equal to + // :sortkeyval2 . + // + // - begins_with ( sortKeyName , :sortkeyval ) - true if the sort key value + // begins with a particular operand. (You cannot use this function with a sort key + // that is of type Number.) Note that the function name begins_with is + // case-sensitive. + // + // Use the ExpressionAttributeValues parameter to replace tokens such as + // :partitionval and :sortval with actual values at runtime. + // + // You can optionally use the ExpressionAttributeNames parameter to replace the + // names of the partition key and sort key with placeholder tokens. This option + // might be necessary if an attribute name conflicts with a DynamoDB reserved word. + // For example, the following KeyConditionExpression parameter causes an error + // because Size is a reserved word: + // + // - Size = :myval + // + // To work around this, define a placeholder (such a #S ) to represent the + // attribute name Size. KeyConditionExpression then is as follows: + // + // - #S = :myval + // + // For a list of reserved words, see [Reserved Words] in the Amazon DynamoDB Developer Guide. + // + // For more information on ExpressionAttributeNames and ExpressionAttributeValues , + // see [Using Placeholders for Attribute Names and Values]in the Amazon DynamoDB Developer Guide. + // + // [Reserved Words]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html + // [Using Placeholders for Attribute Names and Values]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ExpressionPlaceholders.html + KeyConditionExpression *string + + // This is a legacy parameter. Use KeyConditionExpression instead. For more + // information, see [KeyConditions]in the Amazon DynamoDB Developer Guide. + // + // [KeyConditions]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.KeyConditions.html + KeyConditions map[string]types.Condition + + // The maximum number of items to evaluate (not necessarily the number of matching + // items). If DynamoDB processes the number of items up to the limit while + // processing the results, it stops the operation and returns the matching values + // up to that point, and a key in LastEvaluatedKey to apply in a subsequent + // operation, so that you can pick up where you left off. Also, if the processed + // dataset size exceeds 1 MB before DynamoDB reaches this limit, it stops the + // operation and returns the matching values up to the limit, and a key in + // LastEvaluatedKey to apply in a subsequent operation to continue the operation. + // For more information, see [Query and Scan]in the Amazon DynamoDB Developer Guide. + // + // [Query and Scan]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html + Limit *int32 + + // A string that identifies one or more attributes to retrieve from the table. + // These attributes can include scalars, sets, or elements of a JSON document. The + // attributes in the expression must be separated by commas. + // + // If no attribute names are specified, then all attributes will be returned. If + // any of the requested attributes are not found, they will not appear in the + // result. + // + // For more information, see [Accessing Item Attributes] in the Amazon DynamoDB Developer Guide. + // + // [Accessing Item Attributes]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html + ProjectionExpression *string + + // This is a legacy parameter. Use FilterExpression instead. For more information, + // see [QueryFilter]in the Amazon DynamoDB Developer Guide. + // + // [QueryFilter]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.QueryFilter.html + QueryFilter map[string]types.Condition + + // Determines the level of detail about either provisioned or on-demand throughput + // consumption that is returned in the response: + // + // - INDEXES - The response includes the aggregate ConsumedCapacity for the + // operation, together with ConsumedCapacity for each table and secondary index + // that was accessed. + // + // Note that some operations, such as GetItem and BatchGetItem , do not access any + // indexes at all. In these cases, specifying INDEXES will only return + // ConsumedCapacity information for table(s). + // + // - TOTAL - The response includes only the aggregate ConsumedCapacity for the + // operation. + // + // - NONE - No ConsumedCapacity details are included in the response. + ReturnConsumedCapacity types.ReturnConsumedCapacity + + // Specifies the order for index traversal: If true (default), the traversal is + // performed in ascending order; if false , the traversal is performed in + // descending order. + // + // Items with the same partition key value are stored in sorted order by sort key. + // If the sort key data type is Number, the results are stored in numeric order. + // For type String, the results are stored in order of UTF-8 bytes. For type + // Binary, DynamoDB treats each byte of the binary data as unsigned. + // + // If ScanIndexForward is true , DynamoDB returns the results in the order in which + // they are stored (by sort key value). This is the default behavior. If + // ScanIndexForward is false , DynamoDB reads the results in reverse order by sort + // key value, and then returns the results to the client. + ScanIndexForward *bool + + // The attributes to be returned in the result. You can retrieve all item + // attributes, specific item attributes, the count of matching items, or in the + // case of an index, some or all of the attributes projected into the index. + // + // - ALL_ATTRIBUTES - Returns all of the item attributes from the specified table + // or index. If you query a local secondary index, then for each matching item in + // the index, DynamoDB fetches the entire item from the parent table. If the index + // is configured to project all item attributes, then all of the data can be + // obtained from the local secondary index, and no fetching is required. + // + // - ALL_PROJECTED_ATTRIBUTES - Allowed only when querying an index. Retrieves + // all attributes that have been projected into the index. If the index is + // configured to project all attributes, this return value is equivalent to + // specifying ALL_ATTRIBUTES . + // + // - COUNT - Returns the number of matching items, rather than the matching items + // themselves. Note that this uses the same quantity of read capacity units as + // getting the items, and is subject to the same item size calculations. + // + // - SPECIFIC_ATTRIBUTES - Returns only the attributes listed in + // ProjectionExpression . This return value is equivalent to specifying + // ProjectionExpression without specifying any value for Select . + // + // If you query or scan a local secondary index and request only attributes that + // are projected into that index, the operation will read only the index and not + // the table. If any of the requested attributes are not projected into the local + // secondary index, DynamoDB fetches each of these attributes from the parent + // table. This extra fetching incurs additional throughput cost and latency. + // + // If you query or scan a global secondary index, you can only request attributes + // that are projected into the index. Global secondary index queries cannot fetch + // attributes from the parent table. + // + // If neither Select nor ProjectionExpression are specified, DynamoDB defaults to + // ALL_ATTRIBUTES when accessing a table, and ALL_PROJECTED_ATTRIBUTES when + // accessing an index. You cannot use both Select and ProjectionExpression + // together in a single request, unless the value for Select is SPECIFIC_ATTRIBUTES + // . (This usage is equivalent to specifying ProjectionExpression without any + // value for Select .) + // + // If you use the ProjectionExpression parameter, then the value for Select can + // only be SPECIFIC_ATTRIBUTES . Any other value for Select will return an error. + Select types.Select + + noSmithyDocumentSerde +} + +func (in *QueryInput) bindEndpointParams(p *EndpointParameters) { + + p.ResourceArn = in.TableName + +} + +// Represents the output of a Query operation. +type QueryOutput struct { + + // The capacity units consumed by the Query operation. The data returned includes + // the total provisioned throughput consumed, along with statistics for the table + // and any indexes involved in the operation. ConsumedCapacity is only returned if + // the ReturnConsumedCapacity parameter was specified. For more information, see [Capacity unit consumption for read operations] + // in the Amazon DynamoDB Developer Guide. + // + // [Capacity unit consumption for read operations]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/read-write-operations.html#read-operation-consumption + ConsumedCapacity *types.ConsumedCapacity + + // The number of items in the response. + // + // If you used a QueryFilter in the request, then Count is the number of items + // returned after the filter was applied, and ScannedCount is the number of + // matching items before the filter was applied. + // + // If you did not use a filter in the request, then Count and ScannedCount are the + // same. + Count int32 + + // An array of item attributes that match the query criteria. Each element in this + // array consists of an attribute name and the value for that attribute. + Items []map[string]types.AttributeValue + + // The primary key of the item where the operation stopped, inclusive of the + // previous result set. Use this value to start a new operation, excluding this + // value in the new request. + // + // If LastEvaluatedKey is empty, then the "last page" of results has been + // processed and there is no more data to be retrieved. + // + // If LastEvaluatedKey is not empty, it does not necessarily mean that there is + // more data in the result set. The only way to know when you have reached the end + // of the result set is when LastEvaluatedKey is empty. + LastEvaluatedKey map[string]types.AttributeValue + + // The number of items evaluated, before any QueryFilter is applied. A high + // ScannedCount value with few, or no, Count results indicates an inefficient Query + // operation. For more information, see [Count and ScannedCount]in the Amazon DynamoDB Developer Guide. + // + // If you did not use a filter in the request, then ScannedCount is the same as + // Count . + // + // [Count and ScannedCount]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Scan.html#Scan.Count + ScannedCount int32 + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationQueryMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson10_serializeOpQuery{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpQuery{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "Query"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpQueryDiscoverEndpointMiddleware(stack, options, c); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpQueryValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opQuery(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addValidateResponseChecksum(stack, options); err != nil { + return err + } + if err = addAcceptEncodingGzip(stack, options); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +// QueryPaginatorOptions is the paginator options for Query +type QueryPaginatorOptions struct { + // The maximum number of items to evaluate (not necessarily the number of matching + // items). If DynamoDB processes the number of items up to the limit while + // processing the results, it stops the operation and returns the matching values + // up to that point, and a key in LastEvaluatedKey to apply in a subsequent + // operation, so that you can pick up where you left off. Also, if the processed + // dataset size exceeds 1 MB before DynamoDB reaches this limit, it stops the + // operation and returns the matching values up to the limit, and a key in + // LastEvaluatedKey to apply in a subsequent operation to continue the operation. + // For more information, see [Query and Scan]in the Amazon DynamoDB Developer Guide. + // + // [Query and Scan]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html + Limit int32 +} + +// QueryPaginator is a paginator for Query +type QueryPaginator struct { + options QueryPaginatorOptions + client QueryAPIClient + params *QueryInput + nextToken map[string]types.AttributeValue + firstPage bool +} + +// NewQueryPaginator returns a new QueryPaginator +func NewQueryPaginator(client QueryAPIClient, params *QueryInput, optFns ...func(*QueryPaginatorOptions)) *QueryPaginator { + if params == nil { + params = &QueryInput{} + } + + options := QueryPaginatorOptions{} + if params.Limit != nil { + options.Limit = *params.Limit + } + + for _, fn := range optFns { + fn(&options) + } + + return &QueryPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.ExclusiveStartKey, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *QueryPaginator) HasMorePages() bool { + return p.firstPage || p.nextToken != nil +} + +// NextPage retrieves the next Query page. +func (p *QueryPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*QueryOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.ExclusiveStartKey = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.Limit = limit + + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) + result, err := p.client.Query(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.LastEvaluatedKey + + _ = prevToken + + return result, nil +} + +func addOpQueryDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error { + return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{ + Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){ + func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) { + opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS + opt.Logger = o.Logger + }, + }, + DiscoverOperation: c.fetchOpQueryDiscoverEndpoint, + EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery, + EndpointDiscoveryRequired: false, + Region: o.Region, + }, "ResolveEndpointV2", middleware.After) +} + +func (c *Client) fetchOpQueryDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) { + input := getOperationInput(ctx) + in, ok := input.(*QueryInput) + if !ok { + return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input) + } + _ = in + + identifierMap := make(map[string]string, 0) + identifierMap["sdk#Region"] = region + + key := fmt.Sprintf("DynamoDB.%v", identifierMap) + + if v, ok := c.endpointCache.Get(key); ok { + return v, nil + } + + discoveryOperationInput := &DescribeEndpointsInput{} + + opt := internalEndpointDiscovery.DiscoverEndpointOptions{} + for _, fn := range optFns { + fn(&opt) + } + + go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt) + return internalEndpointDiscovery.WeightedAddress{}, nil +} + +// QueryAPIClient is a client that implements the Query operation. +type QueryAPIClient interface { + Query(context.Context, *QueryInput, ...func(*Options)) (*QueryOutput, error) +} + +var _ QueryAPIClient = (*Client)(nil) + +func newServiceMetadataMiddleware_opQuery(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "Query", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_RestoreTableFromBackup.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_RestoreTableFromBackup.go new file mode 100644 index 0000000000..250d963882 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_RestoreTableFromBackup.go @@ -0,0 +1,298 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package dynamodb + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates a new table from an existing backup. Any number of users can execute up +// to 50 concurrent restores (any type of restore) in a given account. +// +// You can call RestoreTableFromBackup at a maximum rate of 10 times per second. +// +// You must manually set up the following on the restored table: +// +// - Auto scaling policies +// +// - IAM policies +// +// - Amazon CloudWatch metrics and alarms +// +// - Tags +// +// - Stream settings +// +// - Time to Live (TTL) settings +func (c *Client) RestoreTableFromBackup(ctx context.Context, params *RestoreTableFromBackupInput, optFns ...func(*Options)) (*RestoreTableFromBackupOutput, error) { + if params == nil { + params = &RestoreTableFromBackupInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "RestoreTableFromBackup", params, optFns, c.addOperationRestoreTableFromBackupMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*RestoreTableFromBackupOutput) + out.ResultMetadata = metadata + return out, nil +} + +type RestoreTableFromBackupInput struct { + + // The Amazon Resource Name (ARN) associated with the backup. + // + // This member is required. + BackupArn *string + + // The name of the new table to which the backup must be restored. + // + // This member is required. + TargetTableName *string + + // The billing mode of the restored table. + BillingModeOverride types.BillingMode + + // List of global secondary indexes for the restored table. The indexes provided + // should match existing secondary indexes. You can choose to exclude some or all + // of the indexes at the time of restore. + GlobalSecondaryIndexOverride []types.GlobalSecondaryIndex + + // List of local secondary indexes for the restored table. The indexes provided + // should match existing secondary indexes. You can choose to exclude some or all + // of the indexes at the time of restore. + LocalSecondaryIndexOverride []types.LocalSecondaryIndex + + // Sets the maximum number of read and write units for the specified on-demand + // table. If you use this parameter, you must specify MaxReadRequestUnits , + // MaxWriteRequestUnits , or both. + OnDemandThroughputOverride *types.OnDemandThroughput + + // Provisioned throughput settings for the restored table. + ProvisionedThroughputOverride *types.ProvisionedThroughput + + // The new server-side encryption settings for the restored table. + SSESpecificationOverride *types.SSESpecification + + noSmithyDocumentSerde +} + +func (in *RestoreTableFromBackupInput) bindEndpointParams(p *EndpointParameters) { + + p.ResourceArn = in.TargetTableName + +} + +type RestoreTableFromBackupOutput struct { + + // The description of the table created from an existing backup. + TableDescription *types.TableDescription + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationRestoreTableFromBackupMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson10_serializeOpRestoreTableFromBackup{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpRestoreTableFromBackup{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "RestoreTableFromBackup"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpRestoreTableFromBackupDiscoverEndpointMiddleware(stack, options, c); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpRestoreTableFromBackupValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opRestoreTableFromBackup(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addValidateResponseChecksum(stack, options); err != nil { + return err + } + if err = addAcceptEncodingGzip(stack, options); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func addOpRestoreTableFromBackupDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error { + return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{ + Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){ + func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) { + opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS + opt.Logger = o.Logger + }, + }, + DiscoverOperation: c.fetchOpRestoreTableFromBackupDiscoverEndpoint, + EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery, + EndpointDiscoveryRequired: false, + Region: o.Region, + }, "ResolveEndpointV2", middleware.After) +} + +func (c *Client) fetchOpRestoreTableFromBackupDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) { + input := getOperationInput(ctx) + in, ok := input.(*RestoreTableFromBackupInput) + if !ok { + return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input) + } + _ = in + + identifierMap := make(map[string]string, 0) + identifierMap["sdk#Region"] = region + + key := fmt.Sprintf("DynamoDB.%v", identifierMap) + + if v, ok := c.endpointCache.Get(key); ok { + return v, nil + } + + discoveryOperationInput := &DescribeEndpointsInput{} + + opt := internalEndpointDiscovery.DiscoverEndpointOptions{} + for _, fn := range optFns { + fn(&opt) + } + + go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt) + return internalEndpointDiscovery.WeightedAddress{}, nil +} + +func newServiceMetadataMiddleware_opRestoreTableFromBackup(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "RestoreTableFromBackup", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_RestoreTableToPointInTime.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_RestoreTableToPointInTime.go new file mode 100644 index 0000000000..380fcb85e9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_RestoreTableToPointInTime.go @@ -0,0 +1,330 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package dynamodb + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "time" +) + +// Restores the specified table to the specified point in time within +// EarliestRestorableDateTime and LatestRestorableDateTime . You can restore your +// table to any point in time in the last 35 days. You can set the recovery period +// to any value between 1 and 35 days. Any number of users can execute up to 50 +// concurrent restores (any type of restore) in a given account. +// +// When you restore using point in time recovery, DynamoDB restores your table +// data to the state based on the selected date and time (day:hour:minute:second) +// to a new table. +// +// Along with data, the following are also included on the new restored table +// using point in time recovery: +// +// - Global secondary indexes (GSIs) +// +// - Local secondary indexes (LSIs) +// +// - Provisioned read and write capacity +// +// - Encryption settings +// +// All these settings come from the current settings of the source table at the +// +// time of restore. +// +// You must manually set up the following on the restored table: +// +// - Auto scaling policies +// +// - IAM policies +// +// - Amazon CloudWatch metrics and alarms +// +// - Tags +// +// - Stream settings +// +// - Time to Live (TTL) settings +// +// - Point in time recovery settings +func (c *Client) RestoreTableToPointInTime(ctx context.Context, params *RestoreTableToPointInTimeInput, optFns ...func(*Options)) (*RestoreTableToPointInTimeOutput, error) { + if params == nil { + params = &RestoreTableToPointInTimeInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "RestoreTableToPointInTime", params, optFns, c.addOperationRestoreTableToPointInTimeMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*RestoreTableToPointInTimeOutput) + out.ResultMetadata = metadata + return out, nil +} + +type RestoreTableToPointInTimeInput struct { + + // The name of the new table to which it must be restored to. + // + // This member is required. + TargetTableName *string + + // The billing mode of the restored table. + BillingModeOverride types.BillingMode + + // List of global secondary indexes for the restored table. The indexes provided + // should match existing secondary indexes. You can choose to exclude some or all + // of the indexes at the time of restore. + GlobalSecondaryIndexOverride []types.GlobalSecondaryIndex + + // List of local secondary indexes for the restored table. The indexes provided + // should match existing secondary indexes. You can choose to exclude some or all + // of the indexes at the time of restore. + LocalSecondaryIndexOverride []types.LocalSecondaryIndex + + // Sets the maximum number of read and write units for the specified on-demand + // table. If you use this parameter, you must specify MaxReadRequestUnits , + // MaxWriteRequestUnits , or both. + OnDemandThroughputOverride *types.OnDemandThroughput + + // Provisioned throughput settings for the restored table. + ProvisionedThroughputOverride *types.ProvisionedThroughput + + // Time in the past to restore the table to. + RestoreDateTime *time.Time + + // The new server-side encryption settings for the restored table. + SSESpecificationOverride *types.SSESpecification + + // The DynamoDB table that will be restored. This value is an Amazon Resource Name + // (ARN). + SourceTableArn *string + + // Name of the source table that is being restored. + SourceTableName *string + + // Restore the table to the latest possible time. LatestRestorableDateTime is + // typically 5 minutes before the current time. + UseLatestRestorableTime *bool + + noSmithyDocumentSerde +} + +func (in *RestoreTableToPointInTimeInput) bindEndpointParams(p *EndpointParameters) { + + p.ResourceArn = in.TargetTableName + +} + +type RestoreTableToPointInTimeOutput struct { + + // Represents the properties of a table. + TableDescription *types.TableDescription + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationRestoreTableToPointInTimeMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson10_serializeOpRestoreTableToPointInTime{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpRestoreTableToPointInTime{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "RestoreTableToPointInTime"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpRestoreTableToPointInTimeDiscoverEndpointMiddleware(stack, options, c); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpRestoreTableToPointInTimeValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opRestoreTableToPointInTime(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addValidateResponseChecksum(stack, options); err != nil { + return err + } + if err = addAcceptEncodingGzip(stack, options); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func addOpRestoreTableToPointInTimeDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error { + return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{ + Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){ + func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) { + opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS + opt.Logger = o.Logger + }, + }, + DiscoverOperation: c.fetchOpRestoreTableToPointInTimeDiscoverEndpoint, + EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery, + EndpointDiscoveryRequired: false, + Region: o.Region, + }, "ResolveEndpointV2", middleware.After) +} + +func (c *Client) fetchOpRestoreTableToPointInTimeDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) { + input := getOperationInput(ctx) + in, ok := input.(*RestoreTableToPointInTimeInput) + if !ok { + return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input) + } + _ = in + + identifierMap := make(map[string]string, 0) + identifierMap["sdk#Region"] = region + + key := fmt.Sprintf("DynamoDB.%v", identifierMap) + + if v, ok := c.endpointCache.Get(key); ok { + return v, nil + } + + discoveryOperationInput := &DescribeEndpointsInput{} + + opt := internalEndpointDiscovery.DiscoverEndpointOptions{} + for _, fn := range optFns { + fn(&opt) + } + + go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt) + return internalEndpointDiscovery.WeightedAddress{}, nil +} + +func newServiceMetadataMiddleware_opRestoreTableToPointInTime(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "RestoreTableToPointInTime", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_Scan.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_Scan.go new file mode 100644 index 0000000000..d3604bc75c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_Scan.go @@ -0,0 +1,676 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package dynamodb + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// The Scan operation returns one or more items and item attributes by accessing +// every item in a table or a secondary index. To have DynamoDB return fewer items, +// you can provide a FilterExpression operation. +// +// If the total size of scanned items exceeds the maximum dataset size limit of 1 +// MB, the scan completes and results are returned to the user. The +// LastEvaluatedKey value is also returned and the requestor can use the +// LastEvaluatedKey to continue the scan in a subsequent operation. Each scan +// response also includes number of items that were scanned (ScannedCount) as part +// of the request. If using a FilterExpression , a scan result can result in no +// items meeting the criteria and the Count will result in zero. If you did not +// use a FilterExpression in the scan request, then Count is the same as +// ScannedCount . +// +// Count and ScannedCount only return the count of items specific to a single scan +// request and, unless the table is less than 1MB, do not represent the total +// number of items in the table. +// +// A single Scan operation first reads up to the maximum number of items set (if +// using the Limit parameter) or a maximum of 1 MB of data and then applies any +// filtering to the results if a FilterExpression is provided. If LastEvaluatedKey +// is present in the response, pagination is required to complete the full table +// scan. For more information, see [Paginating the Results]in the Amazon DynamoDB Developer Guide. +// +// Scan operations proceed sequentially; however, for faster performance on a +// large table or secondary index, applications can request a parallel Scan +// operation by providing the Segment and TotalSegments parameters. For more +// information, see [Parallel Scan]in the Amazon DynamoDB Developer Guide. +// +// By default, a Scan uses eventually consistent reads when accessing the items in +// a table. Therefore, the results from an eventually consistent Scan may not +// include the latest item changes at the time the scan iterates through each item +// in the table. If you require a strongly consistent read of each item as the scan +// iterates through the items in the table, you can set the ConsistentRead +// parameter to true. Strong consistency only relates to the consistency of the +// read at the item level. +// +// DynamoDB does not provide snapshot isolation for a scan operation when the +// ConsistentRead parameter is set to true. Thus, a DynamoDB scan operation does +// not guarantee that all reads in a scan see a consistent snapshot of the table +// when the scan operation was requested. +// +// [Paginating the Results]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Scan.html#Scan.Pagination +// [Parallel Scan]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Scan.html#Scan.ParallelScan +func (c *Client) Scan(ctx context.Context, params *ScanInput, optFns ...func(*Options)) (*ScanOutput, error) { + if params == nil { + params = &ScanInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "Scan", params, optFns, c.addOperationScanMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ScanOutput) + out.ResultMetadata = metadata + return out, nil +} + +// Represents the input of a Scan operation. +type ScanInput struct { + + // The name of the table containing the requested items or if you provide IndexName + // , the name of the table to which that index belongs. + // + // You can also provide the Amazon Resource Name (ARN) of the table in this + // parameter. + // + // This member is required. + TableName *string + + // This is a legacy parameter. Use ProjectionExpression instead. For more + // information, see [AttributesToGet]in the Amazon DynamoDB Developer Guide. + // + // [AttributesToGet]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributesToGet.html + AttributesToGet []string + + // This is a legacy parameter. Use FilterExpression instead. For more information, + // see [ConditionalOperator]in the Amazon DynamoDB Developer Guide. + // + // [ConditionalOperator]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html + ConditionalOperator types.ConditionalOperator + + // A Boolean value that determines the read consistency model during the scan: + // + // - If ConsistentRead is false , then the data returned from Scan might not + // contain the results from other recently completed write operations ( PutItem , + // UpdateItem , or DeleteItem ). + // + // - If ConsistentRead is true , then all of the write operations that completed + // before the Scan began are guaranteed to be contained in the Scan response. + // + // The default setting for ConsistentRead is false . + // + // The ConsistentRead parameter is not supported on global secondary indexes. If + // you scan a global secondary index with ConsistentRead set to true, you will + // receive a ValidationException . + ConsistentRead *bool + + // The primary key of the first item that this operation will evaluate. Use the + // value that was returned for LastEvaluatedKey in the previous operation. + // + // The data type for ExclusiveStartKey must be String, Number or Binary. No set + // data types are allowed. + // + // In a parallel scan, a Scan request that includes ExclusiveStartKey must specify + // the same segment whose previous Scan returned the corresponding value of + // LastEvaluatedKey . + ExclusiveStartKey map[string]types.AttributeValue + + // One or more substitution tokens for attribute names in an expression. The + // following are some use cases for using ExpressionAttributeNames : + // + // - To access an attribute whose name conflicts with a DynamoDB reserved word. + // + // - To create a placeholder for repeating occurrences of an attribute name in + // an expression. + // + // - To prevent special characters in an attribute name from being + // misinterpreted in an expression. + // + // Use the # character in an expression to dereference an attribute name. For + // example, consider the following attribute name: + // + // - Percentile + // + // The name of this attribute conflicts with a reserved word, so it cannot be used + // directly in an expression. (For the complete list of reserved words, see [Reserved Words]in the + // Amazon DynamoDB Developer Guide). To work around this, you could specify the + // following for ExpressionAttributeNames : + // + // - {"#P":"Percentile"} + // + // You could then use this substitution in an expression, as in this example: + // + // - #P = :val + // + // Tokens that begin with the : character are expression attribute values, which + // are placeholders for the actual value at runtime. + // + // For more information on expression attribute names, see [Specifying Item Attributes] in the Amazon DynamoDB + // Developer Guide. + // + // [Reserved Words]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html + // [Specifying Item Attributes]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html + ExpressionAttributeNames map[string]string + + // One or more values that can be substituted in an expression. + // + // Use the : (colon) character in an expression to dereference an attribute value. + // For example, suppose that you wanted to check whether the value of the + // ProductStatus attribute was one of the following: + // + // Available | Backordered | Discontinued + // + // You would first need to specify ExpressionAttributeValues as follows: + // + // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, + // ":disc":{"S":"Discontinued"} } + // + // You could then use these values in an expression, such as this: + // + // ProductStatus IN (:avail, :back, :disc) + // + // For more information on expression attribute values, see [Condition Expressions] in the Amazon + // DynamoDB Developer Guide. + // + // [Condition Expressions]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html + ExpressionAttributeValues map[string]types.AttributeValue + + // A string that contains conditions that DynamoDB applies after the Scan + // operation, but before the data is returned to you. Items that do not satisfy the + // FilterExpression criteria are not returned. + // + // A FilterExpression is applied after the items have already been read; the + // process of filtering does not consume any additional read capacity units. + // + // For more information, see [Filter Expressions] in the Amazon DynamoDB Developer Guide. + // + // [Filter Expressions]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Scan.html#Scan.FilterExpression + FilterExpression *string + + // The name of a secondary index to scan. This index can be any local secondary + // index or global secondary index. Note that if you use the IndexName parameter, + // you must also provide TableName . + IndexName *string + + // The maximum number of items to evaluate (not necessarily the number of matching + // items). If DynamoDB processes the number of items up to the limit while + // processing the results, it stops the operation and returns the matching values + // up to that point, and a key in LastEvaluatedKey to apply in a subsequent + // operation, so that you can pick up where you left off. Also, if the processed + // dataset size exceeds 1 MB before DynamoDB reaches this limit, it stops the + // operation and returns the matching values up to the limit, and a key in + // LastEvaluatedKey to apply in a subsequent operation to continue the operation. + // For more information, see [Working with Queries]in the Amazon DynamoDB Developer Guide. + // + // [Working with Queries]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html + Limit *int32 + + // A string that identifies one or more attributes to retrieve from the specified + // table or index. These attributes can include scalars, sets, or elements of a + // JSON document. The attributes in the expression must be separated by commas. + // + // If no attribute names are specified, then all attributes will be returned. If + // any of the requested attributes are not found, they will not appear in the + // result. + // + // For more information, see [Specifying Item Attributes] in the Amazon DynamoDB Developer Guide. + // + // [Specifying Item Attributes]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html + ProjectionExpression *string + + // Determines the level of detail about either provisioned or on-demand throughput + // consumption that is returned in the response: + // + // - INDEXES - The response includes the aggregate ConsumedCapacity for the + // operation, together with ConsumedCapacity for each table and secondary index + // that was accessed. + // + // Note that some operations, such as GetItem and BatchGetItem , do not access any + // indexes at all. In these cases, specifying INDEXES will only return + // ConsumedCapacity information for table(s). + // + // - TOTAL - The response includes only the aggregate ConsumedCapacity for the + // operation. + // + // - NONE - No ConsumedCapacity details are included in the response. + ReturnConsumedCapacity types.ReturnConsumedCapacity + + // This is a legacy parameter. Use FilterExpression instead. For more information, + // see [ScanFilter]in the Amazon DynamoDB Developer Guide. + // + // [ScanFilter]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ScanFilter.html + ScanFilter map[string]types.Condition + + // For a parallel Scan request, Segment identifies an individual segment to be + // scanned by an application worker. + // + // Segment IDs are zero-based, so the first segment is always 0. For example, if + // you want to use four application threads to scan a table or an index, then the + // first thread specifies a Segment value of 0, the second thread specifies 1, and + // so on. + // + // The value of LastEvaluatedKey returned from a parallel Scan request must be + // used as ExclusiveStartKey with the same segment ID in a subsequent Scan + // operation. + // + // The value for Segment must be greater than or equal to 0, and less than the + // value provided for TotalSegments . + // + // If you provide Segment , you must also provide TotalSegments . + Segment *int32 + + // The attributes to be returned in the result. You can retrieve all item + // attributes, specific item attributes, the count of matching items, or in the + // case of an index, some or all of the attributes projected into the index. + // + // - ALL_ATTRIBUTES - Returns all of the item attributes from the specified table + // or index. If you query a local secondary index, then for each matching item in + // the index, DynamoDB fetches the entire item from the parent table. If the index + // is configured to project all item attributes, then all of the data can be + // obtained from the local secondary index, and no fetching is required. + // + // - ALL_PROJECTED_ATTRIBUTES - Allowed only when querying an index. Retrieves + // all attributes that have been projected into the index. If the index is + // configured to project all attributes, this return value is equivalent to + // specifying ALL_ATTRIBUTES . + // + // - COUNT - Returns the number of matching items, rather than the matching items + // themselves. Note that this uses the same quantity of read capacity units as + // getting the items, and is subject to the same item size calculations. + // + // - SPECIFIC_ATTRIBUTES - Returns only the attributes listed in + // ProjectionExpression . This return value is equivalent to specifying + // ProjectionExpression without specifying any value for Select . + // + // If you query or scan a local secondary index and request only attributes that + // are projected into that index, the operation reads only the index and not the + // table. If any of the requested attributes are not projected into the local + // secondary index, DynamoDB fetches each of these attributes from the parent + // table. This extra fetching incurs additional throughput cost and latency. + // + // If you query or scan a global secondary index, you can only request attributes + // that are projected into the index. Global secondary index queries cannot fetch + // attributes from the parent table. + // + // If neither Select nor ProjectionExpression are specified, DynamoDB defaults to + // ALL_ATTRIBUTES when accessing a table, and ALL_PROJECTED_ATTRIBUTES when + // accessing an index. You cannot use both Select and ProjectionExpression + // together in a single request, unless the value for Select is SPECIFIC_ATTRIBUTES + // . (This usage is equivalent to specifying ProjectionExpression without any + // value for Select .) + // + // If you use the ProjectionExpression parameter, then the value for Select can + // only be SPECIFIC_ATTRIBUTES . Any other value for Select will return an error. + Select types.Select + + // For a parallel Scan request, TotalSegments represents the total number of + // segments into which the Scan operation will be divided. The value of + // TotalSegments corresponds to the number of application workers that will perform + // the parallel scan. For example, if you want to use four application threads to + // scan a table or an index, specify a TotalSegments value of 4. + // + // The value for TotalSegments must be greater than or equal to 1, and less than + // or equal to 1000000. If you specify a TotalSegments value of 1, the Scan + // operation will be sequential rather than parallel. + // + // If you specify TotalSegments , you must also specify Segment . + TotalSegments *int32 + + noSmithyDocumentSerde +} + +func (in *ScanInput) bindEndpointParams(p *EndpointParameters) { + + p.ResourceArn = in.TableName + +} + +// Represents the output of a Scan operation. +type ScanOutput struct { + + // The capacity units consumed by the Scan operation. The data returned includes + // the total provisioned throughput consumed, along with statistics for the table + // and any indexes involved in the operation. ConsumedCapacity is only returned if + // the ReturnConsumedCapacity parameter was specified. For more information, see [Capacity unit consumption for read operations] + // in the Amazon DynamoDB Developer Guide. + // + // [Capacity unit consumption for read operations]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/read-write-operations.html#read-operation-consumption + ConsumedCapacity *types.ConsumedCapacity + + // The number of items in the response. + // + // If you set ScanFilter in the request, then Count is the number of items + // returned after the filter was applied, and ScannedCount is the number of + // matching items before the filter was applied. + // + // If you did not use a filter in the request, then Count is the same as + // ScannedCount . + Count int32 + + // An array of item attributes that match the scan criteria. Each element in this + // array consists of an attribute name and the value for that attribute. + Items []map[string]types.AttributeValue + + // The primary key of the item where the operation stopped, inclusive of the + // previous result set. Use this value to start a new operation, excluding this + // value in the new request. + // + // If LastEvaluatedKey is empty, then the "last page" of results has been + // processed and there is no more data to be retrieved. + // + // If LastEvaluatedKey is not empty, it does not necessarily mean that there is + // more data in the result set. The only way to know when you have reached the end + // of the result set is when LastEvaluatedKey is empty. + LastEvaluatedKey map[string]types.AttributeValue + + // The number of items evaluated, before any ScanFilter is applied. A high + // ScannedCount value with few, or no, Count results indicates an inefficient Scan + // operation. For more information, see [Count and ScannedCount]in the Amazon DynamoDB Developer Guide. + // + // If you did not use a filter in the request, then ScannedCount is the same as + // Count . + // + // [Count and ScannedCount]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html#Count + ScannedCount int32 + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationScanMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson10_serializeOpScan{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpScan{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "Scan"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpScanDiscoverEndpointMiddleware(stack, options, c); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpScanValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opScan(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addValidateResponseChecksum(stack, options); err != nil { + return err + } + if err = addAcceptEncodingGzip(stack, options); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +// ScanPaginatorOptions is the paginator options for Scan +type ScanPaginatorOptions struct { + // The maximum number of items to evaluate (not necessarily the number of matching + // items). If DynamoDB processes the number of items up to the limit while + // processing the results, it stops the operation and returns the matching values + // up to that point, and a key in LastEvaluatedKey to apply in a subsequent + // operation, so that you can pick up where you left off. Also, if the processed + // dataset size exceeds 1 MB before DynamoDB reaches this limit, it stops the + // operation and returns the matching values up to the limit, and a key in + // LastEvaluatedKey to apply in a subsequent operation to continue the operation. + // For more information, see [Working with Queries]in the Amazon DynamoDB Developer Guide. + // + // [Working with Queries]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html + Limit int32 +} + +// ScanPaginator is a paginator for Scan +type ScanPaginator struct { + options ScanPaginatorOptions + client ScanAPIClient + params *ScanInput + nextToken map[string]types.AttributeValue + firstPage bool +} + +// NewScanPaginator returns a new ScanPaginator +func NewScanPaginator(client ScanAPIClient, params *ScanInput, optFns ...func(*ScanPaginatorOptions)) *ScanPaginator { + if params == nil { + params = &ScanInput{} + } + + options := ScanPaginatorOptions{} + if params.Limit != nil { + options.Limit = *params.Limit + } + + for _, fn := range optFns { + fn(&options) + } + + return &ScanPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.ExclusiveStartKey, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ScanPaginator) HasMorePages() bool { + return p.firstPage || p.nextToken != nil +} + +// NextPage retrieves the next Scan page. +func (p *ScanPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ScanOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.ExclusiveStartKey = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.Limit = limit + + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) + result, err := p.client.Scan(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.LastEvaluatedKey + + _ = prevToken + + return result, nil +} + +func addOpScanDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error { + return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{ + Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){ + func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) { + opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS + opt.Logger = o.Logger + }, + }, + DiscoverOperation: c.fetchOpScanDiscoverEndpoint, + EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery, + EndpointDiscoveryRequired: false, + Region: o.Region, + }, "ResolveEndpointV2", middleware.After) +} + +func (c *Client) fetchOpScanDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) { + input := getOperationInput(ctx) + in, ok := input.(*ScanInput) + if !ok { + return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input) + } + _ = in + + identifierMap := make(map[string]string, 0) + identifierMap["sdk#Region"] = region + + key := fmt.Sprintf("DynamoDB.%v", identifierMap) + + if v, ok := c.endpointCache.Get(key); ok { + return v, nil + } + + discoveryOperationInput := &DescribeEndpointsInput{} + + opt := internalEndpointDiscovery.DiscoverEndpointOptions{} + for _, fn := range optFns { + fn(&opt) + } + + go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt) + return internalEndpointDiscovery.WeightedAddress{}, nil +} + +// ScanAPIClient is a client that implements the Scan operation. +type ScanAPIClient interface { + Scan(context.Context, *ScanInput, ...func(*Options)) (*ScanOutput, error) +} + +var _ ScanAPIClient = (*Client)(nil) + +func newServiceMetadataMiddleware_opScan(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "Scan", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_TagResource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_TagResource.go new file mode 100644 index 0000000000..f80afd79e9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_TagResource.go @@ -0,0 +1,273 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package dynamodb + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Associate a set of tags with an Amazon DynamoDB resource. You can then activate +// these user-defined tags so that they appear on the Billing and Cost Management +// console for cost allocation tracking. You can call TagResource up to five times +// per second, per account. +// +// - TagResource is an asynchronous operation. If you issue a ListTagsOfResourcerequest +// immediately after a TagResource request, DynamoDB might return your previous +// tag set, if there was one, or an empty tag set. This is because +// ListTagsOfResource uses an eventually consistent query, and the metadata for +// your tags or table might not be available at that moment. Wait for a few +// seconds, and then try the ListTagsOfResource request again. +// +// - The application or removal of tags using TagResource and UntagResource APIs +// is eventually consistent. ListTagsOfResource API will only reflect the changes +// after a few seconds. +// +// For an overview on tagging DynamoDB resources, see [Tagging for DynamoDB] in the Amazon DynamoDB +// Developer Guide. +// +// [Tagging for DynamoDB]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html +func (c *Client) TagResource(ctx context.Context, params *TagResourceInput, optFns ...func(*Options)) (*TagResourceOutput, error) { + if params == nil { + params = &TagResourceInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "TagResource", params, optFns, c.addOperationTagResourceMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*TagResourceOutput) + out.ResultMetadata = metadata + return out, nil +} + +type TagResourceInput struct { + + // Identifies the Amazon DynamoDB resource to which tags should be added. This + // value is an Amazon Resource Name (ARN). + // + // This member is required. + ResourceArn *string + + // The tags to be assigned to the Amazon DynamoDB resource. + // + // This member is required. + Tags []types.Tag + + noSmithyDocumentSerde +} + +func (in *TagResourceInput) bindEndpointParams(p *EndpointParameters) { + + p.ResourceArn = in.ResourceArn + +} + +type TagResourceOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationTagResourceMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson10_serializeOpTagResource{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpTagResource{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "TagResource"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpTagResourceDiscoverEndpointMiddleware(stack, options, c); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpTagResourceValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opTagResource(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addValidateResponseChecksum(stack, options); err != nil { + return err + } + if err = addAcceptEncodingGzip(stack, options); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func addOpTagResourceDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error { + return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{ + Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){ + func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) { + opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS + opt.Logger = o.Logger + }, + }, + DiscoverOperation: c.fetchOpTagResourceDiscoverEndpoint, + EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery, + EndpointDiscoveryRequired: false, + Region: o.Region, + }, "ResolveEndpointV2", middleware.After) +} + +func (c *Client) fetchOpTagResourceDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) { + input := getOperationInput(ctx) + in, ok := input.(*TagResourceInput) + if !ok { + return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input) + } + _ = in + + identifierMap := make(map[string]string, 0) + identifierMap["sdk#Region"] = region + + key := fmt.Sprintf("DynamoDB.%v", identifierMap) + + if v, ok := c.endpointCache.Get(key); ok { + return v, nil + } + + discoveryOperationInput := &DescribeEndpointsInput{} + + opt := internalEndpointDiscovery.DiscoverEndpointOptions{} + for _, fn := range optFns { + fn(&opt) + } + + go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt) + return internalEndpointDiscovery.WeightedAddress{}, nil +} + +func newServiceMetadataMiddleware_opTagResource(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "TagResource", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_TransactGetItems.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_TransactGetItems.go new file mode 100644 index 0000000000..ce6db313c8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_TransactGetItems.go @@ -0,0 +1,303 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package dynamodb + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// TransactGetItems is a synchronous operation that atomically retrieves multiple +// items from one or more tables (but not from indexes) in a single account and +// Region. A TransactGetItems call can contain up to 100 TransactGetItem objects, +// each of which contains a Get structure that specifies an item to retrieve from +// a table in the account and Region. A call to TransactGetItems cannot retrieve +// items from tables in more than one Amazon Web Services account or Region. The +// aggregate size of the items in the transaction cannot exceed 4 MB. +// +// DynamoDB rejects the entire TransactGetItems request if any of the following is +// true: +// +// - A conflicting operation is in the process of updating an item to be read. +// +// - There is insufficient provisioned capacity for the transaction to be +// completed. +// +// - There is a user error, such as an invalid data format. +// +// - The aggregate size of the items in the transaction exceeded 4 MB. +func (c *Client) TransactGetItems(ctx context.Context, params *TransactGetItemsInput, optFns ...func(*Options)) (*TransactGetItemsOutput, error) { + if params == nil { + params = &TransactGetItemsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "TransactGetItems", params, optFns, c.addOperationTransactGetItemsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*TransactGetItemsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type TransactGetItemsInput struct { + + // An ordered array of up to 100 TransactGetItem objects, each of which contains a + // Get structure. + // + // This member is required. + TransactItems []types.TransactGetItem + + // A value of TOTAL causes consumed capacity information to be returned, and a + // value of NONE prevents that information from being returned. No other value is + // valid. + ReturnConsumedCapacity types.ReturnConsumedCapacity + + noSmithyDocumentSerde +} + +func (in *TransactGetItemsInput) bindEndpointParams(p *EndpointParameters) { + func() { + v1 := in.TransactItems + var v2 []string + for _, v := range v1 { + v3 := v.Get + var v4 *string + if v3 != nil { + v5 := v3.TableName + v4 = v5 + } + if v4 != nil { + v2 = append(v2, *v4) + } + } + p.ResourceArnList = v2 + }() + +} + +type TransactGetItemsOutput struct { + + // If the ReturnConsumedCapacity value was TOTAL , this is an array of + // ConsumedCapacity objects, one for each table addressed by TransactGetItem + // objects in the TransactItems parameter. These ConsumedCapacity objects report + // the read-capacity units consumed by the TransactGetItems call in that table. + ConsumedCapacity []types.ConsumedCapacity + + // An ordered array of up to 100 ItemResponse objects, each of which corresponds + // to the TransactGetItem object in the same position in the TransactItems array. + // Each ItemResponse object contains a Map of the name-value pairs that are the + // projected attributes of the requested item. + // + // If a requested item could not be retrieved, the corresponding ItemResponse + // object is Null, or if the requested item has no projected attributes, the + // corresponding ItemResponse object is an empty Map. + Responses []types.ItemResponse + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationTransactGetItemsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson10_serializeOpTransactGetItems{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpTransactGetItems{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "TransactGetItems"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpTransactGetItemsDiscoverEndpointMiddleware(stack, options, c); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpTransactGetItemsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opTransactGetItems(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addValidateResponseChecksum(stack, options); err != nil { + return err + } + if err = addAcceptEncodingGzip(stack, options); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func addOpTransactGetItemsDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error { + return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{ + Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){ + func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) { + opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS + opt.Logger = o.Logger + }, + }, + DiscoverOperation: c.fetchOpTransactGetItemsDiscoverEndpoint, + EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery, + EndpointDiscoveryRequired: false, + Region: o.Region, + }, "ResolveEndpointV2", middleware.After) +} + +func (c *Client) fetchOpTransactGetItemsDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) { + input := getOperationInput(ctx) + in, ok := input.(*TransactGetItemsInput) + if !ok { + return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input) + } + _ = in + + identifierMap := make(map[string]string, 0) + identifierMap["sdk#Region"] = region + + key := fmt.Sprintf("DynamoDB.%v", identifierMap) + + if v, ok := c.endpointCache.Get(key); ok { + return v, nil + } + + discoveryOperationInput := &DescribeEndpointsInput{} + + opt := internalEndpointDiscovery.DiscoverEndpointOptions{} + for _, fn := range optFns { + fn(&opt) + } + + go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt) + return internalEndpointDiscovery.WeightedAddress{}, nil +} + +func newServiceMetadataMiddleware_opTransactGetItems(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "TransactGetItems", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_TransactWriteItems.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_TransactWriteItems.go new file mode 100644 index 0000000000..a345302a81 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_TransactWriteItems.go @@ -0,0 +1,441 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package dynamodb + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// TransactWriteItems is a synchronous write operation that groups up to 100 +// action requests. These actions can target items in different tables, but not in +// different Amazon Web Services accounts or Regions, and no two actions can target +// the same item. For example, you cannot both ConditionCheck and Update the same +// item. The aggregate size of the items in the transaction cannot exceed 4 MB. +// +// The actions are completed atomically so that either all of them succeed, or all +// of them fail. They are defined by the following objects: +// +// - Put — Initiates a PutItem operation to write a new item. This structure +// specifies the primary key of the item to be written, the name of the table to +// write it in, an optional condition expression that must be satisfied for the +// write to succeed, a list of the item's attributes, and a field indicating +// whether to retrieve the item's attributes if the condition is not met. +// +// - Update — Initiates an UpdateItem operation to update an existing item. This +// structure specifies the primary key of the item to be updated, the name of the +// table where it resides, an optional condition expression that must be satisfied +// for the update to succeed, an expression that defines one or more attributes to +// be updated, and a field indicating whether to retrieve the item's attributes if +// the condition is not met. +// +// - Delete — Initiates a DeleteItem operation to delete an existing item. This +// structure specifies the primary key of the item to be deleted, the name of the +// table where it resides, an optional condition expression that must be satisfied +// for the deletion to succeed, and a field indicating whether to retrieve the +// item's attributes if the condition is not met. +// +// - ConditionCheck — Applies a condition to an item that is not being modified +// by the transaction. This structure specifies the primary key of the item to be +// checked, the name of the table where it resides, a condition expression that +// must be satisfied for the transaction to succeed, and a field indicating whether +// to retrieve the item's attributes if the condition is not met. +// +// DynamoDB rejects the entire TransactWriteItems request if any of the following +// is true: +// +// - A condition in one of the condition expressions is not met. +// +// - An ongoing operation is in the process of updating the same item. +// +// - There is insufficient provisioned capacity for the transaction to be +// completed. +// +// - An item size becomes too large (bigger than 400 KB), a local secondary +// index (LSI) becomes too large, or a similar validation error occurs because of +// changes made by the transaction. +// +// - The aggregate size of the items in the transaction exceeds 4 MB. +// +// - There is a user error, such as an invalid data format. +func (c *Client) TransactWriteItems(ctx context.Context, params *TransactWriteItemsInput, optFns ...func(*Options)) (*TransactWriteItemsOutput, error) { + if params == nil { + params = &TransactWriteItemsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "TransactWriteItems", params, optFns, c.addOperationTransactWriteItemsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*TransactWriteItemsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type TransactWriteItemsInput struct { + + // An ordered array of up to 100 TransactWriteItem objects, each of which contains + // a ConditionCheck , Put , Update , or Delete object. These can operate on items + // in different tables, but the tables must reside in the same Amazon Web Services + // account and Region, and no two of them can operate on the same item. + // + // This member is required. + TransactItems []types.TransactWriteItem + + // Providing a ClientRequestToken makes the call to TransactWriteItems idempotent, + // meaning that multiple identical calls have the same effect as one single call. + // + // Although multiple identical calls using the same client request token produce + // the same result on the server (no side effects), the responses to the calls + // might not be the same. If the ReturnConsumedCapacity parameter is set, then the + // initial TransactWriteItems call returns the amount of write capacity units + // consumed in making the changes. Subsequent TransactWriteItems calls with the + // same client token return the number of read capacity units consumed in reading + // the item. + // + // A client request token is valid for 10 minutes after the first request that + // uses it is completed. After 10 minutes, any request with the same client token + // is treated as a new request. Do not resubmit the same request with the same + // client token for more than 10 minutes, or the result might not be idempotent. + // + // If you submit a request with the same client token but a change in other + // parameters within the 10-minute idempotency window, DynamoDB returns an + // IdempotentParameterMismatch exception. + ClientRequestToken *string + + // Determines the level of detail about either provisioned or on-demand throughput + // consumption that is returned in the response: + // + // - INDEXES - The response includes the aggregate ConsumedCapacity for the + // operation, together with ConsumedCapacity for each table and secondary index + // that was accessed. + // + // Note that some operations, such as GetItem and BatchGetItem , do not access any + // indexes at all. In these cases, specifying INDEXES will only return + // ConsumedCapacity information for table(s). + // + // - TOTAL - The response includes only the aggregate ConsumedCapacity for the + // operation. + // + // - NONE - No ConsumedCapacity details are included in the response. + ReturnConsumedCapacity types.ReturnConsumedCapacity + + // Determines whether item collection metrics are returned. If set to SIZE , the + // response includes statistics about item collections (if any), that were modified + // during the operation and are returned in the response. If set to NONE (the + // default), no statistics are returned. + ReturnItemCollectionMetrics types.ReturnItemCollectionMetrics + + noSmithyDocumentSerde +} + +func (in *TransactWriteItemsInput) bindEndpointParams(p *EndpointParameters) { + func() { + v1 := in.TransactItems + var v2 [][]string + for _, v := range v1 { + v3 := v.ConditionCheck + var v4 *string + if v3 != nil { + v5 := v3.TableName + v4 = v5 + } + v6 := v.Put + var v7 *string + if v6 != nil { + v8 := v6.TableName + v7 = v8 + } + v9 := v.Delete + var v10 *string + if v9 != nil { + v11 := v9.TableName + v10 = v11 + } + v12 := v.Update + var v13 *string + if v12 != nil { + v14 := v12.TableName + v13 = v14 + } + v15 := []string{} + if v4 != nil { + v15 = append(v15, *v4) + } + if v7 != nil { + v15 = append(v15, *v7) + } + if v10 != nil { + v15 = append(v15, *v10) + } + if v13 != nil { + v15 = append(v15, *v13) + } + if v15 != nil { + v2 = append(v2, v15) + } + } + var v16 []string + for _, v := range v2 { + v16 = append(v16, v...) + } + p.ResourceArnList = v16 + }() + +} + +type TransactWriteItemsOutput struct { + + // The capacity units consumed by the entire TransactWriteItems operation. The + // values of the list are ordered according to the ordering of the TransactItems + // request parameter. + ConsumedCapacity []types.ConsumedCapacity + + // A list of tables that were processed by TransactWriteItems and, for each table, + // information about any item collections that were affected by individual + // UpdateItem , PutItem , or DeleteItem operations. + ItemCollectionMetrics map[string][]types.ItemCollectionMetrics + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationTransactWriteItemsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson10_serializeOpTransactWriteItems{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpTransactWriteItems{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "TransactWriteItems"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpTransactWriteItemsDiscoverEndpointMiddleware(stack, options, c); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addIdempotencyToken_opTransactWriteItemsMiddleware(stack, options); err != nil { + return err + } + if err = addOpTransactWriteItemsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opTransactWriteItems(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addValidateResponseChecksum(stack, options); err != nil { + return err + } + if err = addAcceptEncodingGzip(stack, options); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func addOpTransactWriteItemsDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error { + return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{ + Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){ + func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) { + opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS + opt.Logger = o.Logger + }, + }, + DiscoverOperation: c.fetchOpTransactWriteItemsDiscoverEndpoint, + EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery, + EndpointDiscoveryRequired: false, + Region: o.Region, + }, "ResolveEndpointV2", middleware.After) +} + +func (c *Client) fetchOpTransactWriteItemsDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) { + input := getOperationInput(ctx) + in, ok := input.(*TransactWriteItemsInput) + if !ok { + return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input) + } + _ = in + + identifierMap := make(map[string]string, 0) + identifierMap["sdk#Region"] = region + + key := fmt.Sprintf("DynamoDB.%v", identifierMap) + + if v, ok := c.endpointCache.Get(key); ok { + return v, nil + } + + discoveryOperationInput := &DescribeEndpointsInput{} + + opt := internalEndpointDiscovery.DiscoverEndpointOptions{} + for _, fn := range optFns { + fn(&opt) + } + + go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt) + return internalEndpointDiscovery.WeightedAddress{}, nil +} + +type idempotencyToken_initializeOpTransactWriteItems struct { + tokenProvider IdempotencyTokenProvider +} + +func (*idempotencyToken_initializeOpTransactWriteItems) ID() string { + return "OperationIdempotencyTokenAutoFill" +} + +func (m *idempotencyToken_initializeOpTransactWriteItems) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + if m.tokenProvider == nil { + return next.HandleInitialize(ctx, in) + } + + input, ok := in.Parameters.(*TransactWriteItemsInput) + if !ok { + return out, metadata, fmt.Errorf("expected middleware input to be of type *TransactWriteItemsInput ") + } + + if input.ClientRequestToken == nil { + t, err := m.tokenProvider.GetIdempotencyToken() + if err != nil { + return out, metadata, err + } + input.ClientRequestToken = &t + } + return next.HandleInitialize(ctx, in) +} +func addIdempotencyToken_opTransactWriteItemsMiddleware(stack *middleware.Stack, cfg Options) error { + return stack.Initialize.Add(&idempotencyToken_initializeOpTransactWriteItems{tokenProvider: cfg.IdempotencyTokenProvider}, middleware.Before) +} + +func newServiceMetadataMiddleware_opTransactWriteItems(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "TransactWriteItems", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UntagResource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UntagResource.go new file mode 100644 index 0000000000..c481bb262d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UntagResource.go @@ -0,0 +1,271 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package dynamodb + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Removes the association of tags from an Amazon DynamoDB resource. You can call +// UntagResource up to five times per second, per account. +// +// - UntagResource is an asynchronous operation. If you issue a ListTagsOfResourcerequest +// immediately after an UntagResource request, DynamoDB might return your +// previous tag set, if there was one, or an empty tag set. This is because +// ListTagsOfResource uses an eventually consistent query, and the metadata for +// your tags or table might not be available at that moment. Wait for a few +// seconds, and then try the ListTagsOfResource request again. +// +// - The application or removal of tags using TagResource and UntagResource APIs +// is eventually consistent. ListTagsOfResource API will only reflect the changes +// after a few seconds. +// +// For an overview on tagging DynamoDB resources, see [Tagging for DynamoDB] in the Amazon DynamoDB +// Developer Guide. +// +// [Tagging for DynamoDB]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html +func (c *Client) UntagResource(ctx context.Context, params *UntagResourceInput, optFns ...func(*Options)) (*UntagResourceOutput, error) { + if params == nil { + params = &UntagResourceInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "UntagResource", params, optFns, c.addOperationUntagResourceMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*UntagResourceOutput) + out.ResultMetadata = metadata + return out, nil +} + +type UntagResourceInput struct { + + // The DynamoDB resource that the tags will be removed from. This value is an + // Amazon Resource Name (ARN). + // + // This member is required. + ResourceArn *string + + // A list of tag keys. Existing tags of the resource whose keys are members of + // this list will be removed from the DynamoDB resource. + // + // This member is required. + TagKeys []string + + noSmithyDocumentSerde +} + +func (in *UntagResourceInput) bindEndpointParams(p *EndpointParameters) { + + p.ResourceArn = in.ResourceArn + +} + +type UntagResourceOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationUntagResourceMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson10_serializeOpUntagResource{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpUntagResource{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "UntagResource"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpUntagResourceDiscoverEndpointMiddleware(stack, options, c); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpUntagResourceValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUntagResource(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addValidateResponseChecksum(stack, options); err != nil { + return err + } + if err = addAcceptEncodingGzip(stack, options); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func addOpUntagResourceDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error { + return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{ + Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){ + func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) { + opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS + opt.Logger = o.Logger + }, + }, + DiscoverOperation: c.fetchOpUntagResourceDiscoverEndpoint, + EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery, + EndpointDiscoveryRequired: false, + Region: o.Region, + }, "ResolveEndpointV2", middleware.After) +} + +func (c *Client) fetchOpUntagResourceDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) { + input := getOperationInput(ctx) + in, ok := input.(*UntagResourceInput) + if !ok { + return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input) + } + _ = in + + identifierMap := make(map[string]string, 0) + identifierMap["sdk#Region"] = region + + key := fmt.Sprintf("DynamoDB.%v", identifierMap) + + if v, ok := c.endpointCache.Get(key); ok { + return v, nil + } + + discoveryOperationInput := &DescribeEndpointsInput{} + + opt := internalEndpointDiscovery.DiscoverEndpointOptions{} + for _, fn := range optFns { + fn(&opt) + } + + go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt) + return internalEndpointDiscovery.WeightedAddress{}, nil +} + +func newServiceMetadataMiddleware_opUntagResource(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "UntagResource", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateContinuousBackups.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateContinuousBackups.go new file mode 100644 index 0000000000..4d9dcb86a7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateContinuousBackups.go @@ -0,0 +1,271 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package dynamodb + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// UpdateContinuousBackups enables or disables point in time recovery for the +// specified table. A successful UpdateContinuousBackups call returns the current +// ContinuousBackupsDescription . Continuous backups are ENABLED on all tables at +// table creation. If point in time recovery is enabled, PointInTimeRecoveryStatus +// will be set to ENABLED. +// +// Once continuous backups and point in time recovery are enabled, you can restore +// to any point in time within EarliestRestorableDateTime and +// LatestRestorableDateTime . +// +// LatestRestorableDateTime is typically 5 minutes before the current time. You +// can restore your table to any point in time in the last 35 days. You can set the +// RecoveryPeriodInDays to any value between 1 and 35 days. +func (c *Client) UpdateContinuousBackups(ctx context.Context, params *UpdateContinuousBackupsInput, optFns ...func(*Options)) (*UpdateContinuousBackupsOutput, error) { + if params == nil { + params = &UpdateContinuousBackupsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "UpdateContinuousBackups", params, optFns, c.addOperationUpdateContinuousBackupsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*UpdateContinuousBackupsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type UpdateContinuousBackupsInput struct { + + // Represents the settings used to enable point in time recovery. + // + // This member is required. + PointInTimeRecoverySpecification *types.PointInTimeRecoverySpecification + + // The name of the table. You can also provide the Amazon Resource Name (ARN) of + // the table in this parameter. + // + // This member is required. + TableName *string + + noSmithyDocumentSerde +} + +func (in *UpdateContinuousBackupsInput) bindEndpointParams(p *EndpointParameters) { + + p.ResourceArn = in.TableName + +} + +type UpdateContinuousBackupsOutput struct { + + // Represents the continuous backups and point in time recovery settings on the + // table. + ContinuousBackupsDescription *types.ContinuousBackupsDescription + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationUpdateContinuousBackupsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson10_serializeOpUpdateContinuousBackups{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpUpdateContinuousBackups{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "UpdateContinuousBackups"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpUpdateContinuousBackupsDiscoverEndpointMiddleware(stack, options, c); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpUpdateContinuousBackupsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateContinuousBackups(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addValidateResponseChecksum(stack, options); err != nil { + return err + } + if err = addAcceptEncodingGzip(stack, options); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func addOpUpdateContinuousBackupsDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error { + return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{ + Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){ + func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) { + opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS + opt.Logger = o.Logger + }, + }, + DiscoverOperation: c.fetchOpUpdateContinuousBackupsDiscoverEndpoint, + EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery, + EndpointDiscoveryRequired: false, + Region: o.Region, + }, "ResolveEndpointV2", middleware.After) +} + +func (c *Client) fetchOpUpdateContinuousBackupsDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) { + input := getOperationInput(ctx) + in, ok := input.(*UpdateContinuousBackupsInput) + if !ok { + return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input) + } + _ = in + + identifierMap := make(map[string]string, 0) + identifierMap["sdk#Region"] = region + + key := fmt.Sprintf("DynamoDB.%v", identifierMap) + + if v, ok := c.endpointCache.Get(key); ok { + return v, nil + } + + discoveryOperationInput := &DescribeEndpointsInput{} + + opt := internalEndpointDiscovery.DiscoverEndpointOptions{} + for _, fn := range optFns { + fn(&opt) + } + + go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt) + return internalEndpointDiscovery.WeightedAddress{}, nil +} + +func newServiceMetadataMiddleware_opUpdateContinuousBackups(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "UpdateContinuousBackups", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateContributorInsights.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateContributorInsights.go new file mode 100644 index 0000000000..edb045481f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateContributorInsights.go @@ -0,0 +1,235 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package dynamodb + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Updates the status for contributor insights for a specific table or index. +// CloudWatch Contributor Insights for DynamoDB graphs display the partition key +// and (if applicable) sort key of frequently accessed items and frequently +// throttled items in plaintext. If you require the use of Amazon Web Services Key +// Management Service (KMS) to encrypt this table’s partition key and sort key data +// with an Amazon Web Services managed key or customer managed key, you should not +// enable CloudWatch Contributor Insights for DynamoDB for this table. +func (c *Client) UpdateContributorInsights(ctx context.Context, params *UpdateContributorInsightsInput, optFns ...func(*Options)) (*UpdateContributorInsightsOutput, error) { + if params == nil { + params = &UpdateContributorInsightsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "UpdateContributorInsights", params, optFns, c.addOperationUpdateContributorInsightsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*UpdateContributorInsightsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type UpdateContributorInsightsInput struct { + + // Represents the contributor insights action. + // + // This member is required. + ContributorInsightsAction types.ContributorInsightsAction + + // The name of the table. You can also provide the Amazon Resource Name (ARN) of + // the table in this parameter. + // + // This member is required. + TableName *string + + // Specifies whether to track all access and throttled events or throttled events + // only for the DynamoDB table or index. + ContributorInsightsMode types.ContributorInsightsMode + + // The global secondary index name, if applicable. + IndexName *string + + noSmithyDocumentSerde +} + +func (in *UpdateContributorInsightsInput) bindEndpointParams(p *EndpointParameters) { + + p.ResourceArn = in.TableName + +} + +type UpdateContributorInsightsOutput struct { + + // The updated mode of CloudWatch Contributor Insights that determines whether to + // monitor all access and throttled events or to track throttled events + // exclusively. + ContributorInsightsMode types.ContributorInsightsMode + + // The status of contributor insights + ContributorInsightsStatus types.ContributorInsightsStatus + + // The name of the global secondary index, if applicable. + IndexName *string + + // The name of the table. + TableName *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationUpdateContributorInsightsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson10_serializeOpUpdateContributorInsights{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpUpdateContributorInsights{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "UpdateContributorInsights"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpUpdateContributorInsightsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateContributorInsights(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addValidateResponseChecksum(stack, options); err != nil { + return err + } + if err = addAcceptEncodingGzip(stack, options); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opUpdateContributorInsights(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "UpdateContributorInsights", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateGlobalTable.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateGlobalTable.go new file mode 100644 index 0000000000..25878256fc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateGlobalTable.go @@ -0,0 +1,292 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package dynamodb + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Adds or removes replicas in the specified global table. The global table must +// already exist to be able to use this operation. Any replica to be added must be +// empty, have the same name as the global table, have the same key schema, have +// DynamoDB Streams enabled, and have the same provisioned and maximum write +// capacity units. +// +// This documentation is for version 2017.11.29 (Legacy) of global tables, which +// should be avoided for new global tables. Customers should use [Global Tables version 2019.11.21 (Current)]when possible, +// because it provides greater flexibility, higher efficiency, and consumes less +// write capacity than 2017.11.29 (Legacy). +// +// To determine which version you're using, see [Determining the global table version you are using]. To update existing global tables +// from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see [Upgrading global tables]. +// +// If you are using global tables [Version 2019.11.21] (Current) you can use [UpdateTable] instead. +// +// Although you can use UpdateGlobalTable to add replicas and remove replicas in a +// single request, for simplicity we recommend that you issue separate requests for +// adding or removing replicas. +// +// If global secondary indexes are specified, then the following conditions must +// also be met: +// +// - The global secondary indexes must have the same name. +// +// - The global secondary indexes must have the same hash key and sort key (if +// present). +// +// - The global secondary indexes must have the same provisioned and maximum +// write capacity units. +// +// [UpdateTable]: https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_UpdateTable.html +// [Global Tables version 2019.11.21 (Current)]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GlobalTables.html +// [Upgrading global tables]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/V2globaltables_upgrade.html +// [Version 2019.11.21]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GlobalTables.html +// [Determining the global table version you are using]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.DetermineVersion.html +func (c *Client) UpdateGlobalTable(ctx context.Context, params *UpdateGlobalTableInput, optFns ...func(*Options)) (*UpdateGlobalTableOutput, error) { + if params == nil { + params = &UpdateGlobalTableInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "UpdateGlobalTable", params, optFns, c.addOperationUpdateGlobalTableMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*UpdateGlobalTableOutput) + out.ResultMetadata = metadata + return out, nil +} + +type UpdateGlobalTableInput struct { + + // The global table name. + // + // This member is required. + GlobalTableName *string + + // A list of Regions that should be added or removed from the global table. + // + // This member is required. + ReplicaUpdates []types.ReplicaUpdate + + noSmithyDocumentSerde +} + +func (in *UpdateGlobalTableInput) bindEndpointParams(p *EndpointParameters) { + + p.ResourceArn = in.GlobalTableName + +} + +type UpdateGlobalTableOutput struct { + + // Contains the details of the global table. + GlobalTableDescription *types.GlobalTableDescription + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationUpdateGlobalTableMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson10_serializeOpUpdateGlobalTable{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpUpdateGlobalTable{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "UpdateGlobalTable"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpUpdateGlobalTableDiscoverEndpointMiddleware(stack, options, c); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpUpdateGlobalTableValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateGlobalTable(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addValidateResponseChecksum(stack, options); err != nil { + return err + } + if err = addAcceptEncodingGzip(stack, options); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func addOpUpdateGlobalTableDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error { + return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{ + Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){ + func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) { + opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS + opt.Logger = o.Logger + }, + }, + DiscoverOperation: c.fetchOpUpdateGlobalTableDiscoverEndpoint, + EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery, + EndpointDiscoveryRequired: false, + Region: o.Region, + }, "ResolveEndpointV2", middleware.After) +} + +func (c *Client) fetchOpUpdateGlobalTableDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) { + input := getOperationInput(ctx) + in, ok := input.(*UpdateGlobalTableInput) + if !ok { + return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input) + } + _ = in + + identifierMap := make(map[string]string, 0) + identifierMap["sdk#Region"] = region + + key := fmt.Sprintf("DynamoDB.%v", identifierMap) + + if v, ok := c.endpointCache.Get(key); ok { + return v, nil + } + + discoveryOperationInput := &DescribeEndpointsInput{} + + opt := internalEndpointDiscovery.DiscoverEndpointOptions{} + for _, fn := range optFns { + fn(&opt) + } + + go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt) + return internalEndpointDiscovery.WeightedAddress{}, nil +} + +func newServiceMetadataMiddleware_opUpdateGlobalTable(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "UpdateGlobalTable", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateGlobalTableSettings.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateGlobalTableSettings.go new file mode 100644 index 0000000000..6505381e88 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateGlobalTableSettings.go @@ -0,0 +1,295 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package dynamodb + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Updates settings for a global table. +// +// This documentation is for version 2017.11.29 (Legacy) of global tables, which +// should be avoided for new global tables. Customers should use [Global Tables version 2019.11.21 (Current)]when possible, +// because it provides greater flexibility, higher efficiency, and consumes less +// write capacity than 2017.11.29 (Legacy). +// +// To determine which version you're using, see [Determining the global table version you are using]. To update existing global tables +// from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see [Upgrading global tables]. +// +// [Global Tables version 2019.11.21 (Current)]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GlobalTables.html +// [Upgrading global tables]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/V2globaltables_upgrade.html +// [Determining the global table version you are using]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.DetermineVersion.html +func (c *Client) UpdateGlobalTableSettings(ctx context.Context, params *UpdateGlobalTableSettingsInput, optFns ...func(*Options)) (*UpdateGlobalTableSettingsOutput, error) { + if params == nil { + params = &UpdateGlobalTableSettingsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "UpdateGlobalTableSettings", params, optFns, c.addOperationUpdateGlobalTableSettingsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*UpdateGlobalTableSettingsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type UpdateGlobalTableSettingsInput struct { + + // The name of the global table + // + // This member is required. + GlobalTableName *string + + // The billing mode of the global table. If GlobalTableBillingMode is not + // specified, the global table defaults to PROVISIONED capacity billing mode. + // + // - PROVISIONED - We recommend using PROVISIONED for predictable workloads. + // PROVISIONED sets the billing mode to [Provisioned capacity mode]. + // + // - PAY_PER_REQUEST - We recommend using PAY_PER_REQUEST for unpredictable + // workloads. PAY_PER_REQUEST sets the billing mode to [On-demand capacity mode]. + // + // [Provisioned capacity mode]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/provisioned-capacity-mode.html + // [On-demand capacity mode]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/on-demand-capacity-mode.html + GlobalTableBillingMode types.BillingMode + + // Represents the settings of a global secondary index for a global table that + // will be modified. + GlobalTableGlobalSecondaryIndexSettingsUpdate []types.GlobalTableGlobalSecondaryIndexSettingsUpdate + + // Auto scaling settings for managing provisioned write capacity for the global + // table. + GlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate *types.AutoScalingSettingsUpdate + + // The maximum number of writes consumed per second before DynamoDB returns a + // ThrottlingException. + GlobalTableProvisionedWriteCapacityUnits *int64 + + // Represents the settings for a global table in a Region that will be modified. + ReplicaSettingsUpdate []types.ReplicaSettingsUpdate + + noSmithyDocumentSerde +} + +func (in *UpdateGlobalTableSettingsInput) bindEndpointParams(p *EndpointParameters) { + + p.ResourceArn = in.GlobalTableName + +} + +type UpdateGlobalTableSettingsOutput struct { + + // The name of the global table. + GlobalTableName *string + + // The Region-specific settings for the global table. + ReplicaSettings []types.ReplicaSettingsDescription + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationUpdateGlobalTableSettingsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson10_serializeOpUpdateGlobalTableSettings{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpUpdateGlobalTableSettings{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "UpdateGlobalTableSettings"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpUpdateGlobalTableSettingsDiscoverEndpointMiddleware(stack, options, c); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpUpdateGlobalTableSettingsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateGlobalTableSettings(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addValidateResponseChecksum(stack, options); err != nil { + return err + } + if err = addAcceptEncodingGzip(stack, options); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func addOpUpdateGlobalTableSettingsDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error { + return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{ + Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){ + func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) { + opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS + opt.Logger = o.Logger + }, + }, + DiscoverOperation: c.fetchOpUpdateGlobalTableSettingsDiscoverEndpoint, + EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery, + EndpointDiscoveryRequired: false, + Region: o.Region, + }, "ResolveEndpointV2", middleware.After) +} + +func (c *Client) fetchOpUpdateGlobalTableSettingsDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) { + input := getOperationInput(ctx) + in, ok := input.(*UpdateGlobalTableSettingsInput) + if !ok { + return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input) + } + _ = in + + identifierMap := make(map[string]string, 0) + identifierMap["sdk#Region"] = region + + key := fmt.Sprintf("DynamoDB.%v", identifierMap) + + if v, ok := c.endpointCache.Get(key); ok { + return v, nil + } + + discoveryOperationInput := &DescribeEndpointsInput{} + + opt := internalEndpointDiscovery.DiscoverEndpointOptions{} + for _, fn := range optFns { + fn(&opt) + } + + go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt) + return internalEndpointDiscovery.WeightedAddress{}, nil +} + +func newServiceMetadataMiddleware_opUpdateGlobalTableSettings(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "UpdateGlobalTableSettings", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateItem.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateItem.go new file mode 100644 index 0000000000..8416ad88e7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateItem.go @@ -0,0 +1,536 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package dynamodb + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Edits an existing item's attributes, or adds a new item to the table if it does +// not already exist. You can put, delete, or add attribute values. You can also +// perform a conditional update on an existing item (insert a new attribute +// name-value pair if it doesn't exist, or replace an existing name-value pair if +// it has certain expected attribute values). +// +// You can also return the item's attribute values in the same UpdateItem +// operation using the ReturnValues parameter. +func (c *Client) UpdateItem(ctx context.Context, params *UpdateItemInput, optFns ...func(*Options)) (*UpdateItemOutput, error) { + if params == nil { + params = &UpdateItemInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "UpdateItem", params, optFns, c.addOperationUpdateItemMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*UpdateItemOutput) + out.ResultMetadata = metadata + return out, nil +} + +// Represents the input of an UpdateItem operation. +type UpdateItemInput struct { + + // The primary key of the item to be updated. Each element consists of an + // attribute name and a value for that attribute. + // + // For the primary key, you must provide all of the attributes. For example, with + // a simple primary key, you only need to provide a value for the partition key. + // For a composite primary key, you must provide values for both the partition key + // and the sort key. + // + // This member is required. + Key map[string]types.AttributeValue + + // The name of the table containing the item to update. You can also provide the + // Amazon Resource Name (ARN) of the table in this parameter. + // + // This member is required. + TableName *string + + // This is a legacy parameter. Use UpdateExpression instead. For more information, + // see [AttributeUpdates]in the Amazon DynamoDB Developer Guide. + // + // [AttributeUpdates]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributeUpdates.html + AttributeUpdates map[string]types.AttributeValueUpdate + + // A condition that must be satisfied in order for a conditional update to succeed. + // + // An expression can contain any of the following: + // + // - Functions: attribute_exists | attribute_not_exists | attribute_type | + // contains | begins_with | size + // + // These function names are case-sensitive. + // + // - Comparison operators: = | <> | < | > | <= | >= | BETWEEN | IN + // + // - Logical operators: AND | OR | NOT + // + // For more information about condition expressions, see [Specifying Conditions] in the Amazon DynamoDB + // Developer Guide. + // + // [Specifying Conditions]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html + ConditionExpression *string + + // This is a legacy parameter. Use ConditionExpression instead. For more + // information, see [ConditionalOperator]in the Amazon DynamoDB Developer Guide. + // + // [ConditionalOperator]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html + ConditionalOperator types.ConditionalOperator + + // This is a legacy parameter. Use ConditionExpression instead. For more + // information, see [Expected]in the Amazon DynamoDB Developer Guide. + // + // [Expected]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.Expected.html + Expected map[string]types.ExpectedAttributeValue + + // One or more substitution tokens for attribute names in an expression. The + // following are some use cases for using ExpressionAttributeNames : + // + // - To access an attribute whose name conflicts with a DynamoDB reserved word. + // + // - To create a placeholder for repeating occurrences of an attribute name in + // an expression. + // + // - To prevent special characters in an attribute name from being + // misinterpreted in an expression. + // + // Use the # character in an expression to dereference an attribute name. For + // example, consider the following attribute name: + // + // - Percentile + // + // The name of this attribute conflicts with a reserved word, so it cannot be used + // directly in an expression. (For the complete list of reserved words, see [Reserved Words]in the + // Amazon DynamoDB Developer Guide.) To work around this, you could specify the + // following for ExpressionAttributeNames : + // + // - {"#P":"Percentile"} + // + // You could then use this substitution in an expression, as in this example: + // + // - #P = :val + // + // Tokens that begin with the : character are expression attribute values, which + // are placeholders for the actual value at runtime. + // + // For more information about expression attribute names, see [Specifying Item Attributes] in the Amazon + // DynamoDB Developer Guide. + // + // [Reserved Words]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html + // [Specifying Item Attributes]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html + ExpressionAttributeNames map[string]string + + // One or more values that can be substituted in an expression. + // + // Use the : (colon) character in an expression to dereference an attribute value. + // For example, suppose that you wanted to check whether the value of the + // ProductStatus attribute was one of the following: + // + // Available | Backordered | Discontinued + // + // You would first need to specify ExpressionAttributeValues as follows: + // + // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, + // ":disc":{"S":"Discontinued"} } + // + // You could then use these values in an expression, such as this: + // + // ProductStatus IN (:avail, :back, :disc) + // + // For more information on expression attribute values, see [Condition Expressions] in the Amazon + // DynamoDB Developer Guide. + // + // [Condition Expressions]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html + ExpressionAttributeValues map[string]types.AttributeValue + + // Determines the level of detail about either provisioned or on-demand throughput + // consumption that is returned in the response: + // + // - INDEXES - The response includes the aggregate ConsumedCapacity for the + // operation, together with ConsumedCapacity for each table and secondary index + // that was accessed. + // + // Note that some operations, such as GetItem and BatchGetItem , do not access any + // indexes at all. In these cases, specifying INDEXES will only return + // ConsumedCapacity information for table(s). + // + // - TOTAL - The response includes only the aggregate ConsumedCapacity for the + // operation. + // + // - NONE - No ConsumedCapacity details are included in the response. + ReturnConsumedCapacity types.ReturnConsumedCapacity + + // Determines whether item collection metrics are returned. If set to SIZE , the + // response includes statistics about item collections, if any, that were modified + // during the operation are returned in the response. If set to NONE (the + // default), no statistics are returned. + ReturnItemCollectionMetrics types.ReturnItemCollectionMetrics + + // Use ReturnValues if you want to get the item attributes as they appear before + // or after they are successfully updated. For UpdateItem , the valid values are: + // + // - NONE - If ReturnValues is not specified, or if its value is NONE , then + // nothing is returned. (This setting is the default for ReturnValues .) + // + // - ALL_OLD - Returns all of the attributes of the item, as they appeared before + // the UpdateItem operation. + // + // - UPDATED_OLD - Returns only the updated attributes, as they appeared before + // the UpdateItem operation. + // + // - ALL_NEW - Returns all of the attributes of the item, as they appear after + // the UpdateItem operation. + // + // - UPDATED_NEW - Returns only the updated attributes, as they appear after the + // UpdateItem operation. + // + // There is no additional cost associated with requesting a return value aside + // from the small network and processing overhead of receiving a larger response. + // No read capacity units are consumed. + // + // The values returned are strongly consistent. + ReturnValues types.ReturnValue + + // An optional parameter that returns the item attributes for an UpdateItem + // operation that failed a condition check. + // + // There is no additional cost associated with requesting a return value aside + // from the small network and processing overhead of receiving a larger response. + // No read capacity units are consumed. + ReturnValuesOnConditionCheckFailure types.ReturnValuesOnConditionCheckFailure + + // An expression that defines one or more attributes to be updated, the action to + // be performed on them, and new values for them. + // + // The following action values are available for UpdateExpression . + // + // - SET - Adds one or more attributes and values to an item. If any of these + // attributes already exist, they are replaced by the new values. You can also use + // SET to add or subtract from an attribute that is of type Number. For example: + // SET myNum = myNum + :val + // + // SET supports the following functions: + // + // - if_not_exists (path, operand) - if the item does not contain an attribute at + // the specified path, then if_not_exists evaluates to operand; otherwise, it + // evaluates to path. You can use this function to avoid overwriting an attribute + // that may already be present in the item. + // + // - list_append (operand, operand) - evaluates to a list with a new element + // added to it. You can append the new element to the start or the end of the list + // by reversing the order of the operands. + // + // These function names are case-sensitive. + // + // - REMOVE - Removes one or more attributes from an item. + // + // - ADD - Adds the specified value to the item, if the attribute does not + // already exist. If the attribute does exist, then the behavior of ADD depends + // on the data type of the attribute: + // + // - If the existing attribute is a number, and if Value is also a number, then + // Value is mathematically added to the existing attribute. If Value is a + // negative number, then it is subtracted from the existing attribute. + // + // If you use ADD to increment or decrement a number value for an item that doesn't + // exist before the update, DynamoDB uses 0 as the initial value. + // + // Similarly, if you use ADD for an existing item to increment or decrement an + // attribute value that doesn't exist before the update, DynamoDB uses 0 as the + // initial value. For example, suppose that the item you want to update doesn't + // have an attribute named itemcount , but you decide to ADD the number 3 to this + // attribute anyway. DynamoDB will create the itemcount attribute, set its + // initial value to 0 , and finally add 3 to it. The result will be a new + // itemcount attribute in the item, with a value of 3 . + // + // - If the existing data type is a set and if Value is also a set, then Value is + // added to the existing set. For example, if the attribute value is the set + // [1,2] , and the ADD action specified [3] , then the final attribute value is + // [1,2,3] . An error occurs if an ADD action is specified for a set attribute + // and the attribute type specified does not match the existing set type. + // + // Both sets must have the same primitive data type. For example, if the existing + // data type is a set of strings, the Value must also be a set of strings. + // + // The ADD action only supports Number and set data types. In addition, ADD can + // only be used on top-level attributes, not nested attributes. + // + // - DELETE - Deletes an element from a set. + // + // If a set of values is specified, then those values are subtracted from the old + // set. For example, if the attribute value was the set [a,b,c] and the DELETE + // action specifies [a,c] , then the final attribute value is [b] . Specifying an + // empty set is an error. + // + // The DELETE action only supports set data types. In addition, DELETE can only be + // used on top-level attributes, not nested attributes. + // + // You can have many actions in a single expression, such as the following: SET + // a=:value1, b=:value2 DELETE :value3, :value4, :value5 + // + // For more information on update expressions, see [Modifying Items and Attributes] in the Amazon DynamoDB + // Developer Guide. + // + // [Modifying Items and Attributes]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.Modifying.html + UpdateExpression *string + + noSmithyDocumentSerde +} + +func (in *UpdateItemInput) bindEndpointParams(p *EndpointParameters) { + + p.ResourceArn = in.TableName + +} + +// Represents the output of an UpdateItem operation. +type UpdateItemOutput struct { + + // A map of attribute values as they appear before or after the UpdateItem + // operation, as determined by the ReturnValues parameter. + // + // The Attributes map is only present if the update was successful and ReturnValues + // was specified as something other than NONE in the request. Each element + // represents one attribute. + Attributes map[string]types.AttributeValue + + // The capacity units consumed by the UpdateItem operation. The data returned + // includes the total provisioned throughput consumed, along with statistics for + // the table and any indexes involved in the operation. ConsumedCapacity is only + // returned if the ReturnConsumedCapacity parameter was specified. For more + // information, see [Capacity unity consumption for write operations]in the Amazon DynamoDB Developer Guide. + // + // [Capacity unity consumption for write operations]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/read-write-operations.html#write-operation-consumption + ConsumedCapacity *types.ConsumedCapacity + + // Information about item collections, if any, that were affected by the UpdateItem + // operation. ItemCollectionMetrics is only returned if the + // ReturnItemCollectionMetrics parameter was specified. If the table does not have + // any local secondary indexes, this information is not returned in the response. + // + // Each ItemCollectionMetrics element consists of: + // + // - ItemCollectionKey - The partition key value of the item collection. This is + // the same as the partition key value of the item itself. + // + // - SizeEstimateRangeGB - An estimate of item collection size, in gigabytes. + // This value is a two-element array containing a lower bound and an upper bound + // for the estimate. The estimate includes the size of all the items in the table, + // plus the size of all attributes projected into all of the local secondary + // indexes on that table. Use this estimate to measure whether a local secondary + // index is approaching its size limit. + // + // The estimate is subject to change over time; therefore, do not rely on the + // precision or accuracy of the estimate. + ItemCollectionMetrics *types.ItemCollectionMetrics + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationUpdateItemMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson10_serializeOpUpdateItem{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpUpdateItem{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "UpdateItem"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpUpdateItemDiscoverEndpointMiddleware(stack, options, c); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpUpdateItemValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateItem(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addValidateResponseChecksum(stack, options); err != nil { + return err + } + if err = addAcceptEncodingGzip(stack, options); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func addOpUpdateItemDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error { + return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{ + Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){ + func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) { + opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS + opt.Logger = o.Logger + }, + }, + DiscoverOperation: c.fetchOpUpdateItemDiscoverEndpoint, + EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery, + EndpointDiscoveryRequired: false, + Region: o.Region, + }, "ResolveEndpointV2", middleware.After) +} + +func (c *Client) fetchOpUpdateItemDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) { + input := getOperationInput(ctx) + in, ok := input.(*UpdateItemInput) + if !ok { + return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input) + } + _ = in + + identifierMap := make(map[string]string, 0) + identifierMap["sdk#Region"] = region + + key := fmt.Sprintf("DynamoDB.%v", identifierMap) + + if v, ok := c.endpointCache.Get(key); ok { + return v, nil + } + + discoveryOperationInput := &DescribeEndpointsInput{} + + opt := internalEndpointDiscovery.DiscoverEndpointOptions{} + for _, fn := range optFns { + fn(&opt) + } + + go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt) + return internalEndpointDiscovery.WeightedAddress{}, nil +} + +func newServiceMetadataMiddleware_opUpdateItem(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "UpdateItem", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateKinesisStreamingDestination.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateKinesisStreamingDestination.go new file mode 100644 index 0000000000..9a1b68ebd0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateKinesisStreamingDestination.go @@ -0,0 +1,270 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package dynamodb + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// The command to update the Kinesis stream destination. +func (c *Client) UpdateKinesisStreamingDestination(ctx context.Context, params *UpdateKinesisStreamingDestinationInput, optFns ...func(*Options)) (*UpdateKinesisStreamingDestinationOutput, error) { + if params == nil { + params = &UpdateKinesisStreamingDestinationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "UpdateKinesisStreamingDestination", params, optFns, c.addOperationUpdateKinesisStreamingDestinationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*UpdateKinesisStreamingDestinationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type UpdateKinesisStreamingDestinationInput struct { + + // The Amazon Resource Name (ARN) for the Kinesis stream input. + // + // This member is required. + StreamArn *string + + // The table name for the Kinesis streaming destination input. You can also + // provide the ARN of the table in this parameter. + // + // This member is required. + TableName *string + + // The command to update the Kinesis stream configuration. + UpdateKinesisStreamingConfiguration *types.UpdateKinesisStreamingConfiguration + + noSmithyDocumentSerde +} + +func (in *UpdateKinesisStreamingDestinationInput) bindEndpointParams(p *EndpointParameters) { + + p.ResourceArn = in.TableName + +} + +type UpdateKinesisStreamingDestinationOutput struct { + + // The status of the attempt to update the Kinesis streaming destination output. + DestinationStatus types.DestinationStatus + + // The ARN for the Kinesis stream input. + StreamArn *string + + // The table name for the Kinesis streaming destination output. + TableName *string + + // The command to update the Kinesis streaming destination configuration. + UpdateKinesisStreamingConfiguration *types.UpdateKinesisStreamingConfiguration + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationUpdateKinesisStreamingDestinationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson10_serializeOpUpdateKinesisStreamingDestination{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpUpdateKinesisStreamingDestination{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "UpdateKinesisStreamingDestination"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpUpdateKinesisStreamingDestinationDiscoverEndpointMiddleware(stack, options, c); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpUpdateKinesisStreamingDestinationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateKinesisStreamingDestination(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addValidateResponseChecksum(stack, options); err != nil { + return err + } + if err = addAcceptEncodingGzip(stack, options); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func addOpUpdateKinesisStreamingDestinationDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error { + return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{ + Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){ + func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) { + opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS + opt.Logger = o.Logger + }, + }, + DiscoverOperation: c.fetchOpUpdateKinesisStreamingDestinationDiscoverEndpoint, + EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery, + EndpointDiscoveryRequired: false, + Region: o.Region, + }, "ResolveEndpointV2", middleware.After) +} + +func (c *Client) fetchOpUpdateKinesisStreamingDestinationDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) { + input := getOperationInput(ctx) + in, ok := input.(*UpdateKinesisStreamingDestinationInput) + if !ok { + return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input) + } + _ = in + + identifierMap := make(map[string]string, 0) + identifierMap["sdk#Region"] = region + + key := fmt.Sprintf("DynamoDB.%v", identifierMap) + + if v, ok := c.endpointCache.Get(key); ok { + return v, nil + } + + discoveryOperationInput := &DescribeEndpointsInput{} + + opt := internalEndpointDiscovery.DiscoverEndpointOptions{} + for _, fn := range optFns { + fn(&opt) + } + + go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt) + return internalEndpointDiscovery.WeightedAddress{}, nil +} + +func newServiceMetadataMiddleware_opUpdateKinesisStreamingDestination(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "UpdateKinesisStreamingDestination", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateTable.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateTable.go new file mode 100644 index 0000000000..325db35c8d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateTable.go @@ -0,0 +1,381 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package dynamodb + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Modifies the provisioned throughput settings, global secondary indexes, or +// DynamoDB Streams settings for a given table. +// +// You can only perform one of the following operations at once: +// +// - Modify the provisioned throughput settings of the table. +// +// - Remove a global secondary index from the table. +// +// - Create a new global secondary index on the table. After the index begins +// backfilling, you can use UpdateTable to perform other operations. +// +// UpdateTable is an asynchronous operation; while it's executing, the table +// status changes from ACTIVE to UPDATING . While it's UPDATING , you can't issue +// another UpdateTable request. When the table returns to the ACTIVE state, the +// UpdateTable operation is complete. +func (c *Client) UpdateTable(ctx context.Context, params *UpdateTableInput, optFns ...func(*Options)) (*UpdateTableOutput, error) { + if params == nil { + params = &UpdateTableInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "UpdateTable", params, optFns, c.addOperationUpdateTableMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*UpdateTableOutput) + out.ResultMetadata = metadata + return out, nil +} + +// Represents the input of an UpdateTable operation. +type UpdateTableInput struct { + + // The name of the table to be updated. You can also provide the Amazon Resource + // Name (ARN) of the table in this parameter. + // + // This member is required. + TableName *string + + // An array of attributes that describe the key schema for the table and indexes. + // If you are adding a new global secondary index to the table, + // AttributeDefinitions must include the key element(s) of the new index. + AttributeDefinitions []types.AttributeDefinition + + // Controls how you are charged for read and write throughput and how you manage + // capacity. When switching from pay-per-request to provisioned capacity, initial + // provisioned capacity values must be set. The initial provisioned capacity values + // are estimated based on the consumed read and write capacity of your table and + // global secondary indexes over the past 30 minutes. + // + // - PAY_PER_REQUEST - We recommend using PAY_PER_REQUEST for most DynamoDB + // workloads. PAY_PER_REQUEST sets the billing mode to [On-demand capacity mode]. + // + // - PROVISIONED - We recommend using PROVISIONED for steady workloads with + // predictable growth where capacity requirements can be reliably forecasted. + // PROVISIONED sets the billing mode to [Provisioned capacity mode]. + // + // [Provisioned capacity mode]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/provisioned-capacity-mode.html + // [On-demand capacity mode]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/on-demand-capacity-mode.html + BillingMode types.BillingMode + + // Indicates whether deletion protection is to be enabled (true) or disabled + // (false) on the table. + DeletionProtectionEnabled *bool + + // An array of one or more global secondary indexes for the table. For each index + // in the array, you can request one action: + // + // - Create - add a new global secondary index to the table. + // + // - Update - modify the provisioned throughput settings of an existing global + // secondary index. + // + // - Delete - remove a global secondary index from the table. + // + // You can create or delete only one global secondary index per UpdateTable + // operation. + // + // For more information, see [Managing Global Secondary Indexes] in the Amazon DynamoDB Developer Guide. + // + // [Managing Global Secondary Indexes]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GSI.OnlineOps.html + GlobalSecondaryIndexUpdates []types.GlobalSecondaryIndexUpdate + + // A list of witness updates for a MRSC global table. A witness provides a + // cost-effective alternative to a full replica in a MRSC global table by + // maintaining replicated change data written to global table replicas. You cannot + // perform read or write operations on a witness. For each witness, you can request + // one action: + // + // - Create - add a new witness to the global table. + // + // - Delete - remove a witness from the global table. + // + // You can create or delete only one witness per UpdateTable operation. + // + // For more information, see [Multi-Region strong consistency (MRSC)] in the Amazon DynamoDB Developer Guide + // + // [Multi-Region strong consistency (MRSC)]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/V2globaltables_HowItWorks.html#V2globaltables_HowItWorks.consistency-modes + GlobalTableWitnessUpdates []types.GlobalTableWitnessGroupUpdate + + // Specifies the consistency mode for a new global table. This parameter is only + // valid when you create a global table by specifying one or more [Create]actions in the [ReplicaUpdates] + // action list. + // + // You can specify one of the following consistency modes: + // + // - EVENTUAL : Configures a new global table for multi-Region eventual + // consistency (MREC). This is the default consistency mode for global tables. + // + // - STRONG : Configures a new global table for multi-Region strong consistency + // (MRSC). + // + // If you don't specify this field, the global table consistency mode defaults to + // EVENTUAL . For more information about global tables consistency modes, see [Consistency modes] in + // DynamoDB developer guide. + // + // [ReplicaUpdates]: https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_UpdateTable.html#DDB-UpdateTable-request-ReplicaUpdates + // [Create]: https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_ReplicationGroupUpdate.html#DDB-Type-ReplicationGroupUpdate-Create + // [Consistency modes]: https://docs.aws.amazon.com/V2globaltables_HowItWorks.html#V2globaltables_HowItWorks.consistency-modes + MultiRegionConsistency types.MultiRegionConsistency + + // Updates the maximum number of read and write units for the specified table in + // on-demand capacity mode. If you use this parameter, you must specify + // MaxReadRequestUnits , MaxWriteRequestUnits , or both. + OnDemandThroughput *types.OnDemandThroughput + + // The new provisioned throughput settings for the specified table or index. + ProvisionedThroughput *types.ProvisionedThroughput + + // A list of replica update actions (create, delete, or update) for the table. + ReplicaUpdates []types.ReplicationGroupUpdate + + // The new server-side encryption settings for the specified table. + SSESpecification *types.SSESpecification + + // Represents the DynamoDB Streams configuration for the table. + // + // You receive a ValidationException if you try to enable a stream on a table that + // already has a stream, or if you try to disable a stream on a table that doesn't + // have a stream. + StreamSpecification *types.StreamSpecification + + // The table class of the table to be updated. Valid values are STANDARD and + // STANDARD_INFREQUENT_ACCESS . + TableClass types.TableClass + + // Represents the warm throughput (in read units per second and write units per + // second) for updating a table. + WarmThroughput *types.WarmThroughput + + noSmithyDocumentSerde +} + +func (in *UpdateTableInput) bindEndpointParams(p *EndpointParameters) { + + p.ResourceArn = in.TableName + +} + +// Represents the output of an UpdateTable operation. +type UpdateTableOutput struct { + + // Represents the properties of the table. + TableDescription *types.TableDescription + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationUpdateTableMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson10_serializeOpUpdateTable{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpUpdateTable{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "UpdateTable"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpUpdateTableDiscoverEndpointMiddleware(stack, options, c); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpUpdateTableValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateTable(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addValidateResponseChecksum(stack, options); err != nil { + return err + } + if err = addAcceptEncodingGzip(stack, options); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func addOpUpdateTableDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error { + return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{ + Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){ + func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) { + opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS + opt.Logger = o.Logger + }, + }, + DiscoverOperation: c.fetchOpUpdateTableDiscoverEndpoint, + EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery, + EndpointDiscoveryRequired: false, + Region: o.Region, + }, "ResolveEndpointV2", middleware.After) +} + +func (c *Client) fetchOpUpdateTableDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) { + input := getOperationInput(ctx) + in, ok := input.(*UpdateTableInput) + if !ok { + return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input) + } + _ = in + + identifierMap := make(map[string]string, 0) + identifierMap["sdk#Region"] = region + + key := fmt.Sprintf("DynamoDB.%v", identifierMap) + + if v, ok := c.endpointCache.Get(key); ok { + return v, nil + } + + discoveryOperationInput := &DescribeEndpointsInput{} + + opt := internalEndpointDiscovery.DiscoverEndpointOptions{} + for _, fn := range optFns { + fn(&opt) + } + + go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt) + return internalEndpointDiscovery.WeightedAddress{}, nil +} + +func newServiceMetadataMiddleware_opUpdateTable(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "UpdateTable", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateTableReplicaAutoScaling.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateTableReplicaAutoScaling.go new file mode 100644 index 0000000000..26b33641df --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateTableReplicaAutoScaling.go @@ -0,0 +1,218 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package dynamodb + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Updates auto scaling settings on your global tables at once. +func (c *Client) UpdateTableReplicaAutoScaling(ctx context.Context, params *UpdateTableReplicaAutoScalingInput, optFns ...func(*Options)) (*UpdateTableReplicaAutoScalingOutput, error) { + if params == nil { + params = &UpdateTableReplicaAutoScalingInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "UpdateTableReplicaAutoScaling", params, optFns, c.addOperationUpdateTableReplicaAutoScalingMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*UpdateTableReplicaAutoScalingOutput) + out.ResultMetadata = metadata + return out, nil +} + +type UpdateTableReplicaAutoScalingInput struct { + + // The name of the global table to be updated. You can also provide the Amazon + // Resource Name (ARN) of the table in this parameter. + // + // This member is required. + TableName *string + + // Represents the auto scaling settings of the global secondary indexes of the + // replica to be updated. + GlobalSecondaryIndexUpdates []types.GlobalSecondaryIndexAutoScalingUpdate + + // Represents the auto scaling settings to be modified for a global table or + // global secondary index. + ProvisionedWriteCapacityAutoScalingUpdate *types.AutoScalingSettingsUpdate + + // Represents the auto scaling settings of replicas of the table that will be + // modified. + ReplicaUpdates []types.ReplicaAutoScalingUpdate + + noSmithyDocumentSerde +} + +func (in *UpdateTableReplicaAutoScalingInput) bindEndpointParams(p *EndpointParameters) { + + p.ResourceArn = in.TableName + +} + +type UpdateTableReplicaAutoScalingOutput struct { + + // Returns information about the auto scaling settings of a table with replicas. + TableAutoScalingDescription *types.TableAutoScalingDescription + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationUpdateTableReplicaAutoScalingMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson10_serializeOpUpdateTableReplicaAutoScaling{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpUpdateTableReplicaAutoScaling{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "UpdateTableReplicaAutoScaling"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpUpdateTableReplicaAutoScalingValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateTableReplicaAutoScaling(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addValidateResponseChecksum(stack, options); err != nil { + return err + } + if err = addAcceptEncodingGzip(stack, options); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opUpdateTableReplicaAutoScaling(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "UpdateTableReplicaAutoScaling", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateTimeToLive.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateTimeToLive.go new file mode 100644 index 0000000000..6c0fc24a13 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateTimeToLive.go @@ -0,0 +1,288 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package dynamodb + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// The UpdateTimeToLive method enables or disables Time to Live (TTL) for the +// specified table. A successful UpdateTimeToLive call returns the current +// TimeToLiveSpecification . It can take up to one hour for the change to fully +// process. Any additional UpdateTimeToLive calls for the same table during this +// one hour duration result in a ValidationException . +// +// TTL compares the current time in epoch time format to the time stored in the +// TTL attribute of an item. If the epoch time value stored in the attribute is +// less than the current time, the item is marked as expired and subsequently +// deleted. +// +// The epoch time format is the number of seconds elapsed since 12:00:00 AM +// January 1, 1970 UTC. +// +// DynamoDB deletes expired items on a best-effort basis to ensure availability of +// throughput for other data operations. +// +// DynamoDB typically deletes expired items within two days of expiration. The +// exact duration within which an item gets deleted after expiration is specific to +// the nature of the workload. Items that have expired and not been deleted will +// still show up in reads, queries, and scans. +// +// As items are deleted, they are removed from any local secondary index and +// global secondary index immediately in the same eventually consistent way as a +// standard delete operation. +// +// For more information, see [Time To Live] in the Amazon DynamoDB Developer Guide. +// +// [Time To Live]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/TTL.html +func (c *Client) UpdateTimeToLive(ctx context.Context, params *UpdateTimeToLiveInput, optFns ...func(*Options)) (*UpdateTimeToLiveOutput, error) { + if params == nil { + params = &UpdateTimeToLiveInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "UpdateTimeToLive", params, optFns, c.addOperationUpdateTimeToLiveMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*UpdateTimeToLiveOutput) + out.ResultMetadata = metadata + return out, nil +} + +// Represents the input of an UpdateTimeToLive operation. +type UpdateTimeToLiveInput struct { + + // The name of the table to be configured. You can also provide the Amazon + // Resource Name (ARN) of the table in this parameter. + // + // This member is required. + TableName *string + + // Represents the settings used to enable or disable Time to Live for the + // specified table. + // + // This member is required. + TimeToLiveSpecification *types.TimeToLiveSpecification + + noSmithyDocumentSerde +} + +func (in *UpdateTimeToLiveInput) bindEndpointParams(p *EndpointParameters) { + + p.ResourceArn = in.TableName + +} + +type UpdateTimeToLiveOutput struct { + + // Represents the output of an UpdateTimeToLive operation. + TimeToLiveSpecification *types.TimeToLiveSpecification + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationUpdateTimeToLiveMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson10_serializeOpUpdateTimeToLive{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpUpdateTimeToLive{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "UpdateTimeToLive"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpUpdateTimeToLiveDiscoverEndpointMiddleware(stack, options, c); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpUpdateTimeToLiveValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateTimeToLive(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addValidateResponseChecksum(stack, options); err != nil { + return err + } + if err = addAcceptEncodingGzip(stack, options); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func addOpUpdateTimeToLiveDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error { + return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{ + Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){ + func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) { + opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS + opt.Logger = o.Logger + }, + }, + DiscoverOperation: c.fetchOpUpdateTimeToLiveDiscoverEndpoint, + EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery, + EndpointDiscoveryRequired: false, + Region: o.Region, + }, "ResolveEndpointV2", middleware.After) +} + +func (c *Client) fetchOpUpdateTimeToLiveDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) { + input := getOperationInput(ctx) + in, ok := input.(*UpdateTimeToLiveInput) + if !ok { + return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input) + } + _ = in + + identifierMap := make(map[string]string, 0) + identifierMap["sdk#Region"] = region + + key := fmt.Sprintf("DynamoDB.%v", identifierMap) + + if v, ok := c.endpointCache.Get(key); ok { + return v, nil + } + + discoveryOperationInput := &DescribeEndpointsInput{} + + opt := internalEndpointDiscovery.DiscoverEndpointOptions{} + for _, fn := range optFns { + fn(&opt) + } + + go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt) + return internalEndpointDiscovery.WeightedAddress{}, nil +} + +func newServiceMetadataMiddleware_opUpdateTimeToLive(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "UpdateTimeToLive", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/auth.go new file mode 100644 index 0000000000..b1d605c82d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/auth.go @@ -0,0 +1,339 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package dynamodb + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + smithy "github.com/aws/smithy-go" + smithyauth "github.com/aws/smithy-go/auth" + "github.com/aws/smithy-go/metrics" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/tracing" + smithyhttp "github.com/aws/smithy-go/transport/http" + "slices" + "strings" +) + +func bindAuthParamsRegion(_ interface{}, params *AuthResolverParameters, _ interface{}, options Options) { + params.Region = options.Region +} + +type setLegacyContextSigningOptionsMiddleware struct { +} + +func (*setLegacyContextSigningOptionsMiddleware) ID() string { + return "setLegacyContextSigningOptions" +} + +func (m *setLegacyContextSigningOptionsMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + rscheme := getResolvedAuthScheme(ctx) + schemeID := rscheme.Scheme.SchemeID() + + if sn := awsmiddleware.GetSigningName(ctx); sn != "" { + if schemeID == "aws.auth#sigv4" { + smithyhttp.SetSigV4SigningName(&rscheme.SignerProperties, sn) + } else if schemeID == "aws.auth#sigv4a" { + smithyhttp.SetSigV4ASigningName(&rscheme.SignerProperties, sn) + } + } + + if sr := awsmiddleware.GetSigningRegion(ctx); sr != "" { + if schemeID == "aws.auth#sigv4" { + smithyhttp.SetSigV4SigningRegion(&rscheme.SignerProperties, sr) + } else if schemeID == "aws.auth#sigv4a" { + smithyhttp.SetSigV4ASigningRegions(&rscheme.SignerProperties, []string{sr}) + } + } + + return next.HandleFinalize(ctx, in) +} + +func addSetLegacyContextSigningOptionsMiddleware(stack *middleware.Stack) error { + return stack.Finalize.Insert(&setLegacyContextSigningOptionsMiddleware{}, "Signing", middleware.Before) +} + +type withAnonymous struct { + resolver AuthSchemeResolver +} + +var _ AuthSchemeResolver = (*withAnonymous)(nil) + +func (v *withAnonymous) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) { + opts, err := v.resolver.ResolveAuthSchemes(ctx, params) + if err != nil { + return nil, err + } + + opts = append(opts, &smithyauth.Option{ + SchemeID: smithyauth.SchemeIDAnonymous, + }) + return opts, nil +} + +func wrapWithAnonymousAuth(options *Options) { + if _, ok := options.AuthSchemeResolver.(*defaultAuthSchemeResolver); !ok { + return + } + + options.AuthSchemeResolver = &withAnonymous{ + resolver: options.AuthSchemeResolver, + } +} + +// AuthResolverParameters contains the set of inputs necessary for auth scheme +// resolution. +type AuthResolverParameters struct { + // The name of the operation being invoked. + Operation string + + // The region in which the operation is being invoked. + Region string +} + +func bindAuthResolverParams(ctx context.Context, operation string, input interface{}, options Options) *AuthResolverParameters { + params := &AuthResolverParameters{ + Operation: operation, + } + + bindAuthParamsRegion(ctx, params, input, options) + + return params +} + +// AuthSchemeResolver returns a set of possible authentication options for an +// operation. +type AuthSchemeResolver interface { + ResolveAuthSchemes(context.Context, *AuthResolverParameters) ([]*smithyauth.Option, error) +} + +type defaultAuthSchemeResolver struct{} + +var _ AuthSchemeResolver = (*defaultAuthSchemeResolver)(nil) + +func (*defaultAuthSchemeResolver) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) { + if overrides, ok := operationAuthOptions[params.Operation]; ok { + return overrides(params), nil + } + return serviceAuthOptions(params), nil +} + +var operationAuthOptions = map[string]func(*AuthResolverParameters) []*smithyauth.Option{} + +func serviceAuthOptions(params *AuthResolverParameters) []*smithyauth.Option { + return []*smithyauth.Option{ + { + SchemeID: smithyauth.SchemeIDSigV4, + SignerProperties: func() smithy.Properties { + var props smithy.Properties + smithyhttp.SetSigV4SigningName(&props, "dynamodb") + smithyhttp.SetSigV4SigningRegion(&props, params.Region) + return props + }(), + }, + } +} + +type resolveAuthSchemeMiddleware struct { + operation string + options Options +} + +func (*resolveAuthSchemeMiddleware) ID() string { + return "ResolveAuthScheme" +} + +func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "ResolveAuthScheme") + defer span.End() + + params := bindAuthResolverParams(ctx, m.operation, getOperationInput(ctx), m.options) + options, err := m.options.AuthSchemeResolver.ResolveAuthSchemes(ctx, params) + if err != nil { + return out, metadata, fmt.Errorf("resolve auth scheme: %w", err) + } + + scheme, ok := m.selectScheme(options) + if !ok { + return out, metadata, fmt.Errorf("could not select an auth scheme") + } + + ctx = setResolvedAuthScheme(ctx, scheme) + + span.SetProperty("auth.scheme_id", scheme.Scheme.SchemeID()) + span.End() + return next.HandleFinalize(ctx, in) +} + +func (m *resolveAuthSchemeMiddleware) selectScheme(options []*smithyauth.Option) (*resolvedAuthScheme, bool) { + sorted := sortAuthOptions(options, m.options.AuthSchemePreference) + for _, option := range sorted { + if option.SchemeID == smithyauth.SchemeIDAnonymous { + return newResolvedAuthScheme(smithyhttp.NewAnonymousScheme(), option), true + } + + for _, scheme := range m.options.AuthSchemes { + if scheme.SchemeID() != option.SchemeID { + continue + } + + if scheme.IdentityResolver(m.options) != nil { + return newResolvedAuthScheme(scheme, option), true + } + } + } + + return nil, false +} + +func sortAuthOptions(options []*smithyauth.Option, preferred []string) []*smithyauth.Option { + byPriority := make([]*smithyauth.Option, 0, len(options)) + for _, prefName := range preferred { + for _, option := range options { + optName := option.SchemeID + if parts := strings.Split(option.SchemeID, "#"); len(parts) == 2 { + optName = parts[1] + } + if prefName == optName { + byPriority = append(byPriority, option) + } + } + } + for _, option := range options { + if !slices.ContainsFunc(byPriority, func(o *smithyauth.Option) bool { + return o.SchemeID == option.SchemeID + }) { + byPriority = append(byPriority, option) + } + } + return byPriority +} + +type resolvedAuthSchemeKey struct{} + +type resolvedAuthScheme struct { + Scheme smithyhttp.AuthScheme + IdentityProperties smithy.Properties + SignerProperties smithy.Properties +} + +func newResolvedAuthScheme(scheme smithyhttp.AuthScheme, option *smithyauth.Option) *resolvedAuthScheme { + return &resolvedAuthScheme{ + Scheme: scheme, + IdentityProperties: option.IdentityProperties, + SignerProperties: option.SignerProperties, + } +} + +func setResolvedAuthScheme(ctx context.Context, scheme *resolvedAuthScheme) context.Context { + return middleware.WithStackValue(ctx, resolvedAuthSchemeKey{}, scheme) +} + +func getResolvedAuthScheme(ctx context.Context) *resolvedAuthScheme { + v, _ := middleware.GetStackValue(ctx, resolvedAuthSchemeKey{}).(*resolvedAuthScheme) + return v +} + +type getIdentityMiddleware struct { + options Options +} + +func (*getIdentityMiddleware) ID() string { + return "GetIdentity" +} + +func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + innerCtx, span := tracing.StartSpan(ctx, "GetIdentity") + defer span.End() + + rscheme := getResolvedAuthScheme(innerCtx) + if rscheme == nil { + return out, metadata, fmt.Errorf("no resolved auth scheme") + } + + resolver := rscheme.Scheme.IdentityResolver(m.options) + if resolver == nil { + return out, metadata, fmt.Errorf("no identity resolver") + } + + identity, err := timeOperationMetric(ctx, "client.call.resolve_identity_duration", + func() (smithyauth.Identity, error) { + return resolver.GetIdentity(innerCtx, rscheme.IdentityProperties) + }, + func(o *metrics.RecordMetricOptions) { + o.Properties.Set("auth.scheme_id", rscheme.Scheme.SchemeID()) + }) + if err != nil { + return out, metadata, fmt.Errorf("get identity: %w", err) + } + + ctx = setIdentity(ctx, identity) + + span.End() + return next.HandleFinalize(ctx, in) +} + +type identityKey struct{} + +func setIdentity(ctx context.Context, identity smithyauth.Identity) context.Context { + return middleware.WithStackValue(ctx, identityKey{}, identity) +} + +func getIdentity(ctx context.Context) smithyauth.Identity { + v, _ := middleware.GetStackValue(ctx, identityKey{}).(smithyauth.Identity) + return v +} + +type signRequestMiddleware struct { + options Options +} + +func (*signRequestMiddleware) ID() string { + return "Signing" +} + +func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "SignRequest") + defer span.End() + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unexpected transport type %T", in.Request) + } + + rscheme := getResolvedAuthScheme(ctx) + if rscheme == nil { + return out, metadata, fmt.Errorf("no resolved auth scheme") + } + + identity := getIdentity(ctx) + if identity == nil { + return out, metadata, fmt.Errorf("no identity") + } + + signer := rscheme.Scheme.Signer() + if signer == nil { + return out, metadata, fmt.Errorf("no signer") + } + + _, err = timeOperationMetric(ctx, "client.call.signing_duration", func() (any, error) { + return nil, signer.SignRequest(ctx, req, identity, rscheme.SignerProperties) + }, func(o *metrics.RecordMetricOptions) { + o.Properties.Set("auth.scheme_id", rscheme.Scheme.SchemeID()) + }) + if err != nil { + return out, metadata, fmt.Errorf("sign request: %w", err) + } + + span.End() + return next.HandleFinalize(ctx, in) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/deserializers.go new file mode 100644 index 0000000000..fdf566c4c5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/deserializers.go @@ -0,0 +1,19498 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package dynamodb + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/json" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws/protocol/restjson" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + smithy "github.com/aws/smithy-go" + smithyio "github.com/aws/smithy-go/io" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithytime "github.com/aws/smithy-go/time" + "github.com/aws/smithy-go/tracing" + smithyhttp "github.com/aws/smithy-go/transport/http" + "io" + "io/ioutil" + "math" + "strings" +) + +type awsAwsjson10_deserializeOpBatchExecuteStatement struct { +} + +func (*awsAwsjson10_deserializeOpBatchExecuteStatement) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson10_deserializeOpBatchExecuteStatement) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson10_deserializeOpErrorBatchExecuteStatement(response, &metadata) + } + output := &BatchExecuteStatementOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson10_deserializeOpDocumentBatchExecuteStatementOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson10_deserializeOpErrorBatchExecuteStatement(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerError", errorCode): + return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody) + + case strings.EqualFold("RequestLimitExceeded", errorCode): + return awsAwsjson10_deserializeErrorRequestLimitExceeded(response, errorBody) + + case strings.EqualFold("ThrottlingException", errorCode): + return awsAwsjson10_deserializeErrorThrottlingException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson10_deserializeOpBatchGetItem struct { +} + +func (*awsAwsjson10_deserializeOpBatchGetItem) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson10_deserializeOpBatchGetItem) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson10_deserializeOpErrorBatchGetItem(response, &metadata) + } + output := &BatchGetItemOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson10_deserializeOpDocumentBatchGetItemOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson10_deserializeOpErrorBatchGetItem(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerError", errorCode): + return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody) + + case strings.EqualFold("InvalidEndpointException", errorCode): + return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody) + + case strings.EqualFold("ProvisionedThroughputExceededException", errorCode): + return awsAwsjson10_deserializeErrorProvisionedThroughputExceededException(response, errorBody) + + case strings.EqualFold("RequestLimitExceeded", errorCode): + return awsAwsjson10_deserializeErrorRequestLimitExceeded(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("ThrottlingException", errorCode): + return awsAwsjson10_deserializeErrorThrottlingException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson10_deserializeOpBatchWriteItem struct { +} + +func (*awsAwsjson10_deserializeOpBatchWriteItem) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson10_deserializeOpBatchWriteItem) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson10_deserializeOpErrorBatchWriteItem(response, &metadata) + } + output := &BatchWriteItemOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson10_deserializeOpDocumentBatchWriteItemOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson10_deserializeOpErrorBatchWriteItem(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerError", errorCode): + return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody) + + case strings.EqualFold("InvalidEndpointException", errorCode): + return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody) + + case strings.EqualFold("ItemCollectionSizeLimitExceededException", errorCode): + return awsAwsjson10_deserializeErrorItemCollectionSizeLimitExceededException(response, errorBody) + + case strings.EqualFold("ProvisionedThroughputExceededException", errorCode): + return awsAwsjson10_deserializeErrorProvisionedThroughputExceededException(response, errorBody) + + case strings.EqualFold("ReplicatedWriteConflictException", errorCode): + return awsAwsjson10_deserializeErrorReplicatedWriteConflictException(response, errorBody) + + case strings.EqualFold("RequestLimitExceeded", errorCode): + return awsAwsjson10_deserializeErrorRequestLimitExceeded(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("ThrottlingException", errorCode): + return awsAwsjson10_deserializeErrorThrottlingException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson10_deserializeOpCreateBackup struct { +} + +func (*awsAwsjson10_deserializeOpCreateBackup) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson10_deserializeOpCreateBackup) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson10_deserializeOpErrorCreateBackup(response, &metadata) + } + output := &CreateBackupOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson10_deserializeOpDocumentCreateBackupOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson10_deserializeOpErrorCreateBackup(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("BackupInUseException", errorCode): + return awsAwsjson10_deserializeErrorBackupInUseException(response, errorBody) + + case strings.EqualFold("ContinuousBackupsUnavailableException", errorCode): + return awsAwsjson10_deserializeErrorContinuousBackupsUnavailableException(response, errorBody) + + case strings.EqualFold("InternalServerError", errorCode): + return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody) + + case strings.EqualFold("InvalidEndpointException", errorCode): + return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody) + + case strings.EqualFold("LimitExceededException", errorCode): + return awsAwsjson10_deserializeErrorLimitExceededException(response, errorBody) + + case strings.EqualFold("TableInUseException", errorCode): + return awsAwsjson10_deserializeErrorTableInUseException(response, errorBody) + + case strings.EqualFold("TableNotFoundException", errorCode): + return awsAwsjson10_deserializeErrorTableNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson10_deserializeOpCreateGlobalTable struct { +} + +func (*awsAwsjson10_deserializeOpCreateGlobalTable) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson10_deserializeOpCreateGlobalTable) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson10_deserializeOpErrorCreateGlobalTable(response, &metadata) + } + output := &CreateGlobalTableOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson10_deserializeOpDocumentCreateGlobalTableOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson10_deserializeOpErrorCreateGlobalTable(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("GlobalTableAlreadyExistsException", errorCode): + return awsAwsjson10_deserializeErrorGlobalTableAlreadyExistsException(response, errorBody) + + case strings.EqualFold("InternalServerError", errorCode): + return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody) + + case strings.EqualFold("InvalidEndpointException", errorCode): + return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody) + + case strings.EqualFold("LimitExceededException", errorCode): + return awsAwsjson10_deserializeErrorLimitExceededException(response, errorBody) + + case strings.EqualFold("TableNotFoundException", errorCode): + return awsAwsjson10_deserializeErrorTableNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson10_deserializeOpCreateTable struct { +} + +func (*awsAwsjson10_deserializeOpCreateTable) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson10_deserializeOpCreateTable) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson10_deserializeOpErrorCreateTable(response, &metadata) + } + output := &CreateTableOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson10_deserializeOpDocumentCreateTableOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson10_deserializeOpErrorCreateTable(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerError", errorCode): + return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody) + + case strings.EqualFold("InvalidEndpointException", errorCode): + return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody) + + case strings.EqualFold("LimitExceededException", errorCode): + return awsAwsjson10_deserializeErrorLimitExceededException(response, errorBody) + + case strings.EqualFold("ResourceInUseException", errorCode): + return awsAwsjson10_deserializeErrorResourceInUseException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson10_deserializeOpDeleteBackup struct { +} + +func (*awsAwsjson10_deserializeOpDeleteBackup) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson10_deserializeOpDeleteBackup) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson10_deserializeOpErrorDeleteBackup(response, &metadata) + } + output := &DeleteBackupOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson10_deserializeOpDocumentDeleteBackupOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson10_deserializeOpErrorDeleteBackup(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("BackupInUseException", errorCode): + return awsAwsjson10_deserializeErrorBackupInUseException(response, errorBody) + + case strings.EqualFold("BackupNotFoundException", errorCode): + return awsAwsjson10_deserializeErrorBackupNotFoundException(response, errorBody) + + case strings.EqualFold("InternalServerError", errorCode): + return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody) + + case strings.EqualFold("InvalidEndpointException", errorCode): + return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody) + + case strings.EqualFold("LimitExceededException", errorCode): + return awsAwsjson10_deserializeErrorLimitExceededException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson10_deserializeOpDeleteItem struct { +} + +func (*awsAwsjson10_deserializeOpDeleteItem) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson10_deserializeOpDeleteItem) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson10_deserializeOpErrorDeleteItem(response, &metadata) + } + output := &DeleteItemOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson10_deserializeOpDocumentDeleteItemOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson10_deserializeOpErrorDeleteItem(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("ConditionalCheckFailedException", errorCode): + return awsAwsjson10_deserializeErrorConditionalCheckFailedException(response, errorBody) + + case strings.EqualFold("InternalServerError", errorCode): + return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody) + + case strings.EqualFold("InvalidEndpointException", errorCode): + return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody) + + case strings.EqualFold("ItemCollectionSizeLimitExceededException", errorCode): + return awsAwsjson10_deserializeErrorItemCollectionSizeLimitExceededException(response, errorBody) + + case strings.EqualFold("ProvisionedThroughputExceededException", errorCode): + return awsAwsjson10_deserializeErrorProvisionedThroughputExceededException(response, errorBody) + + case strings.EqualFold("ReplicatedWriteConflictException", errorCode): + return awsAwsjson10_deserializeErrorReplicatedWriteConflictException(response, errorBody) + + case strings.EqualFold("RequestLimitExceeded", errorCode): + return awsAwsjson10_deserializeErrorRequestLimitExceeded(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("ThrottlingException", errorCode): + return awsAwsjson10_deserializeErrorThrottlingException(response, errorBody) + + case strings.EqualFold("TransactionConflictException", errorCode): + return awsAwsjson10_deserializeErrorTransactionConflictException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson10_deserializeOpDeleteResourcePolicy struct { +} + +func (*awsAwsjson10_deserializeOpDeleteResourcePolicy) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson10_deserializeOpDeleteResourcePolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson10_deserializeOpErrorDeleteResourcePolicy(response, &metadata) + } + output := &DeleteResourcePolicyOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson10_deserializeOpDocumentDeleteResourcePolicyOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson10_deserializeOpErrorDeleteResourcePolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerError", errorCode): + return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody) + + case strings.EqualFold("InvalidEndpointException", errorCode): + return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody) + + case strings.EqualFold("LimitExceededException", errorCode): + return awsAwsjson10_deserializeErrorLimitExceededException(response, errorBody) + + case strings.EqualFold("PolicyNotFoundException", errorCode): + return awsAwsjson10_deserializeErrorPolicyNotFoundException(response, errorBody) + + case strings.EqualFold("ResourceInUseException", errorCode): + return awsAwsjson10_deserializeErrorResourceInUseException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson10_deserializeOpDeleteTable struct { +} + +func (*awsAwsjson10_deserializeOpDeleteTable) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson10_deserializeOpDeleteTable) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson10_deserializeOpErrorDeleteTable(response, &metadata) + } + output := &DeleteTableOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson10_deserializeOpDocumentDeleteTableOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson10_deserializeOpErrorDeleteTable(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerError", errorCode): + return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody) + + case strings.EqualFold("InvalidEndpointException", errorCode): + return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody) + + case strings.EqualFold("LimitExceededException", errorCode): + return awsAwsjson10_deserializeErrorLimitExceededException(response, errorBody) + + case strings.EqualFold("ResourceInUseException", errorCode): + return awsAwsjson10_deserializeErrorResourceInUseException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson10_deserializeOpDescribeBackup struct { +} + +func (*awsAwsjson10_deserializeOpDescribeBackup) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson10_deserializeOpDescribeBackup) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson10_deserializeOpErrorDescribeBackup(response, &metadata) + } + output := &DescribeBackupOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson10_deserializeOpDocumentDescribeBackupOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson10_deserializeOpErrorDescribeBackup(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("BackupNotFoundException", errorCode): + return awsAwsjson10_deserializeErrorBackupNotFoundException(response, errorBody) + + case strings.EqualFold("InternalServerError", errorCode): + return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody) + + case strings.EqualFold("InvalidEndpointException", errorCode): + return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson10_deserializeOpDescribeContinuousBackups struct { +} + +func (*awsAwsjson10_deserializeOpDescribeContinuousBackups) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson10_deserializeOpDescribeContinuousBackups) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson10_deserializeOpErrorDescribeContinuousBackups(response, &metadata) + } + output := &DescribeContinuousBackupsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson10_deserializeOpDocumentDescribeContinuousBackupsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson10_deserializeOpErrorDescribeContinuousBackups(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerError", errorCode): + return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody) + + case strings.EqualFold("InvalidEndpointException", errorCode): + return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody) + + case strings.EqualFold("TableNotFoundException", errorCode): + return awsAwsjson10_deserializeErrorTableNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson10_deserializeOpDescribeContributorInsights struct { +} + +func (*awsAwsjson10_deserializeOpDescribeContributorInsights) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson10_deserializeOpDescribeContributorInsights) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson10_deserializeOpErrorDescribeContributorInsights(response, &metadata) + } + output := &DescribeContributorInsightsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson10_deserializeOpDocumentDescribeContributorInsightsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson10_deserializeOpErrorDescribeContributorInsights(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerError", errorCode): + return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson10_deserializeOpDescribeEndpoints struct { +} + +func (*awsAwsjson10_deserializeOpDescribeEndpoints) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson10_deserializeOpDescribeEndpoints) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson10_deserializeOpErrorDescribeEndpoints(response, &metadata) + } + output := &DescribeEndpointsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson10_deserializeOpDocumentDescribeEndpointsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson10_deserializeOpErrorDescribeEndpoints(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson10_deserializeOpDescribeExport struct { +} + +func (*awsAwsjson10_deserializeOpDescribeExport) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson10_deserializeOpDescribeExport) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson10_deserializeOpErrorDescribeExport(response, &metadata) + } + output := &DescribeExportOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson10_deserializeOpDocumentDescribeExportOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson10_deserializeOpErrorDescribeExport(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("ExportNotFoundException", errorCode): + return awsAwsjson10_deserializeErrorExportNotFoundException(response, errorBody) + + case strings.EqualFold("InternalServerError", errorCode): + return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody) + + case strings.EqualFold("LimitExceededException", errorCode): + return awsAwsjson10_deserializeErrorLimitExceededException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson10_deserializeOpDescribeGlobalTable struct { +} + +func (*awsAwsjson10_deserializeOpDescribeGlobalTable) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson10_deserializeOpDescribeGlobalTable) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson10_deserializeOpErrorDescribeGlobalTable(response, &metadata) + } + output := &DescribeGlobalTableOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson10_deserializeOpDocumentDescribeGlobalTableOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson10_deserializeOpErrorDescribeGlobalTable(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("GlobalTableNotFoundException", errorCode): + return awsAwsjson10_deserializeErrorGlobalTableNotFoundException(response, errorBody) + + case strings.EqualFold("InternalServerError", errorCode): + return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody) + + case strings.EqualFold("InvalidEndpointException", errorCode): + return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson10_deserializeOpDescribeGlobalTableSettings struct { +} + +func (*awsAwsjson10_deserializeOpDescribeGlobalTableSettings) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson10_deserializeOpDescribeGlobalTableSettings) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson10_deserializeOpErrorDescribeGlobalTableSettings(response, &metadata) + } + output := &DescribeGlobalTableSettingsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson10_deserializeOpDocumentDescribeGlobalTableSettingsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson10_deserializeOpErrorDescribeGlobalTableSettings(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("GlobalTableNotFoundException", errorCode): + return awsAwsjson10_deserializeErrorGlobalTableNotFoundException(response, errorBody) + + case strings.EqualFold("InternalServerError", errorCode): + return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody) + + case strings.EqualFold("InvalidEndpointException", errorCode): + return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson10_deserializeOpDescribeImport struct { +} + +func (*awsAwsjson10_deserializeOpDescribeImport) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson10_deserializeOpDescribeImport) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson10_deserializeOpErrorDescribeImport(response, &metadata) + } + output := &DescribeImportOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson10_deserializeOpDocumentDescribeImportOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson10_deserializeOpErrorDescribeImport(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("ImportNotFoundException", errorCode): + return awsAwsjson10_deserializeErrorImportNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson10_deserializeOpDescribeKinesisStreamingDestination struct { +} + +func (*awsAwsjson10_deserializeOpDescribeKinesisStreamingDestination) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson10_deserializeOpDescribeKinesisStreamingDestination) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson10_deserializeOpErrorDescribeKinesisStreamingDestination(response, &metadata) + } + output := &DescribeKinesisStreamingDestinationOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson10_deserializeOpDocumentDescribeKinesisStreamingDestinationOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson10_deserializeOpErrorDescribeKinesisStreamingDestination(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerError", errorCode): + return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody) + + case strings.EqualFold("InvalidEndpointException", errorCode): + return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson10_deserializeOpDescribeLimits struct { +} + +func (*awsAwsjson10_deserializeOpDescribeLimits) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson10_deserializeOpDescribeLimits) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson10_deserializeOpErrorDescribeLimits(response, &metadata) + } + output := &DescribeLimitsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson10_deserializeOpDocumentDescribeLimitsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson10_deserializeOpErrorDescribeLimits(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerError", errorCode): + return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody) + + case strings.EqualFold("InvalidEndpointException", errorCode): + return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson10_deserializeOpDescribeTable struct { +} + +func (*awsAwsjson10_deserializeOpDescribeTable) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson10_deserializeOpDescribeTable) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson10_deserializeOpErrorDescribeTable(response, &metadata) + } + output := &DescribeTableOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson10_deserializeOpDocumentDescribeTableOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson10_deserializeOpErrorDescribeTable(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerError", errorCode): + return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody) + + case strings.EqualFold("InvalidEndpointException", errorCode): + return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson10_deserializeOpDescribeTableReplicaAutoScaling struct { +} + +func (*awsAwsjson10_deserializeOpDescribeTableReplicaAutoScaling) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson10_deserializeOpDescribeTableReplicaAutoScaling) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson10_deserializeOpErrorDescribeTableReplicaAutoScaling(response, &metadata) + } + output := &DescribeTableReplicaAutoScalingOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson10_deserializeOpDocumentDescribeTableReplicaAutoScalingOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson10_deserializeOpErrorDescribeTableReplicaAutoScaling(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerError", errorCode): + return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson10_deserializeOpDescribeTimeToLive struct { +} + +func (*awsAwsjson10_deserializeOpDescribeTimeToLive) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson10_deserializeOpDescribeTimeToLive) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson10_deserializeOpErrorDescribeTimeToLive(response, &metadata) + } + output := &DescribeTimeToLiveOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson10_deserializeOpDocumentDescribeTimeToLiveOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson10_deserializeOpErrorDescribeTimeToLive(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerError", errorCode): + return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody) + + case strings.EqualFold("InvalidEndpointException", errorCode): + return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson10_deserializeOpDisableKinesisStreamingDestination struct { +} + +func (*awsAwsjson10_deserializeOpDisableKinesisStreamingDestination) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson10_deserializeOpDisableKinesisStreamingDestination) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson10_deserializeOpErrorDisableKinesisStreamingDestination(response, &metadata) + } + output := &DisableKinesisStreamingDestinationOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson10_deserializeOpDocumentDisableKinesisStreamingDestinationOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson10_deserializeOpErrorDisableKinesisStreamingDestination(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerError", errorCode): + return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody) + + case strings.EqualFold("InvalidEndpointException", errorCode): + return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody) + + case strings.EqualFold("LimitExceededException", errorCode): + return awsAwsjson10_deserializeErrorLimitExceededException(response, errorBody) + + case strings.EqualFold("ResourceInUseException", errorCode): + return awsAwsjson10_deserializeErrorResourceInUseException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson10_deserializeOpEnableKinesisStreamingDestination struct { +} + +func (*awsAwsjson10_deserializeOpEnableKinesisStreamingDestination) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson10_deserializeOpEnableKinesisStreamingDestination) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson10_deserializeOpErrorEnableKinesisStreamingDestination(response, &metadata) + } + output := &EnableKinesisStreamingDestinationOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson10_deserializeOpDocumentEnableKinesisStreamingDestinationOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson10_deserializeOpErrorEnableKinesisStreamingDestination(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerError", errorCode): + return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody) + + case strings.EqualFold("InvalidEndpointException", errorCode): + return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody) + + case strings.EqualFold("LimitExceededException", errorCode): + return awsAwsjson10_deserializeErrorLimitExceededException(response, errorBody) + + case strings.EqualFold("ResourceInUseException", errorCode): + return awsAwsjson10_deserializeErrorResourceInUseException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson10_deserializeOpExecuteStatement struct { +} + +func (*awsAwsjson10_deserializeOpExecuteStatement) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson10_deserializeOpExecuteStatement) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson10_deserializeOpErrorExecuteStatement(response, &metadata) + } + output := &ExecuteStatementOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson10_deserializeOpDocumentExecuteStatementOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson10_deserializeOpErrorExecuteStatement(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("ConditionalCheckFailedException", errorCode): + return awsAwsjson10_deserializeErrorConditionalCheckFailedException(response, errorBody) + + case strings.EqualFold("DuplicateItemException", errorCode): + return awsAwsjson10_deserializeErrorDuplicateItemException(response, errorBody) + + case strings.EqualFold("InternalServerError", errorCode): + return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody) + + case strings.EqualFold("ItemCollectionSizeLimitExceededException", errorCode): + return awsAwsjson10_deserializeErrorItemCollectionSizeLimitExceededException(response, errorBody) + + case strings.EqualFold("ProvisionedThroughputExceededException", errorCode): + return awsAwsjson10_deserializeErrorProvisionedThroughputExceededException(response, errorBody) + + case strings.EqualFold("RequestLimitExceeded", errorCode): + return awsAwsjson10_deserializeErrorRequestLimitExceeded(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("ThrottlingException", errorCode): + return awsAwsjson10_deserializeErrorThrottlingException(response, errorBody) + + case strings.EqualFold("TransactionConflictException", errorCode): + return awsAwsjson10_deserializeErrorTransactionConflictException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson10_deserializeOpExecuteTransaction struct { +} + +func (*awsAwsjson10_deserializeOpExecuteTransaction) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson10_deserializeOpExecuteTransaction) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson10_deserializeOpErrorExecuteTransaction(response, &metadata) + } + output := &ExecuteTransactionOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson10_deserializeOpDocumentExecuteTransactionOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson10_deserializeOpErrorExecuteTransaction(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("IdempotentParameterMismatchException", errorCode): + return awsAwsjson10_deserializeErrorIdempotentParameterMismatchException(response, errorBody) + + case strings.EqualFold("InternalServerError", errorCode): + return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody) + + case strings.EqualFold("ProvisionedThroughputExceededException", errorCode): + return awsAwsjson10_deserializeErrorProvisionedThroughputExceededException(response, errorBody) + + case strings.EqualFold("RequestLimitExceeded", errorCode): + return awsAwsjson10_deserializeErrorRequestLimitExceeded(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("ThrottlingException", errorCode): + return awsAwsjson10_deserializeErrorThrottlingException(response, errorBody) + + case strings.EqualFold("TransactionCanceledException", errorCode): + return awsAwsjson10_deserializeErrorTransactionCanceledException(response, errorBody) + + case strings.EqualFold("TransactionInProgressException", errorCode): + return awsAwsjson10_deserializeErrorTransactionInProgressException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson10_deserializeOpExportTableToPointInTime struct { +} + +func (*awsAwsjson10_deserializeOpExportTableToPointInTime) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson10_deserializeOpExportTableToPointInTime) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson10_deserializeOpErrorExportTableToPointInTime(response, &metadata) + } + output := &ExportTableToPointInTimeOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson10_deserializeOpDocumentExportTableToPointInTimeOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson10_deserializeOpErrorExportTableToPointInTime(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("ExportConflictException", errorCode): + return awsAwsjson10_deserializeErrorExportConflictException(response, errorBody) + + case strings.EqualFold("InternalServerError", errorCode): + return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody) + + case strings.EqualFold("InvalidExportTimeException", errorCode): + return awsAwsjson10_deserializeErrorInvalidExportTimeException(response, errorBody) + + case strings.EqualFold("LimitExceededException", errorCode): + return awsAwsjson10_deserializeErrorLimitExceededException(response, errorBody) + + case strings.EqualFold("PointInTimeRecoveryUnavailableException", errorCode): + return awsAwsjson10_deserializeErrorPointInTimeRecoveryUnavailableException(response, errorBody) + + case strings.EqualFold("TableNotFoundException", errorCode): + return awsAwsjson10_deserializeErrorTableNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson10_deserializeOpGetItem struct { +} + +func (*awsAwsjson10_deserializeOpGetItem) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson10_deserializeOpGetItem) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson10_deserializeOpErrorGetItem(response, &metadata) + } + output := &GetItemOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson10_deserializeOpDocumentGetItemOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson10_deserializeOpErrorGetItem(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerError", errorCode): + return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody) + + case strings.EqualFold("InvalidEndpointException", errorCode): + return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody) + + case strings.EqualFold("ProvisionedThroughputExceededException", errorCode): + return awsAwsjson10_deserializeErrorProvisionedThroughputExceededException(response, errorBody) + + case strings.EqualFold("RequestLimitExceeded", errorCode): + return awsAwsjson10_deserializeErrorRequestLimitExceeded(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("ThrottlingException", errorCode): + return awsAwsjson10_deserializeErrorThrottlingException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson10_deserializeOpGetResourcePolicy struct { +} + +func (*awsAwsjson10_deserializeOpGetResourcePolicy) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson10_deserializeOpGetResourcePolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson10_deserializeOpErrorGetResourcePolicy(response, &metadata) + } + output := &GetResourcePolicyOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson10_deserializeOpDocumentGetResourcePolicyOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson10_deserializeOpErrorGetResourcePolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerError", errorCode): + return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody) + + case strings.EqualFold("InvalidEndpointException", errorCode): + return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody) + + case strings.EqualFold("PolicyNotFoundException", errorCode): + return awsAwsjson10_deserializeErrorPolicyNotFoundException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson10_deserializeOpImportTable struct { +} + +func (*awsAwsjson10_deserializeOpImportTable) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson10_deserializeOpImportTable) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson10_deserializeOpErrorImportTable(response, &metadata) + } + output := &ImportTableOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson10_deserializeOpDocumentImportTableOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson10_deserializeOpErrorImportTable(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("ImportConflictException", errorCode): + return awsAwsjson10_deserializeErrorImportConflictException(response, errorBody) + + case strings.EqualFold("LimitExceededException", errorCode): + return awsAwsjson10_deserializeErrorLimitExceededException(response, errorBody) + + case strings.EqualFold("ResourceInUseException", errorCode): + return awsAwsjson10_deserializeErrorResourceInUseException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson10_deserializeOpListBackups struct { +} + +func (*awsAwsjson10_deserializeOpListBackups) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson10_deserializeOpListBackups) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson10_deserializeOpErrorListBackups(response, &metadata) + } + output := &ListBackupsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson10_deserializeOpDocumentListBackupsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson10_deserializeOpErrorListBackups(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerError", errorCode): + return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody) + + case strings.EqualFold("InvalidEndpointException", errorCode): + return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson10_deserializeOpListContributorInsights struct { +} + +func (*awsAwsjson10_deserializeOpListContributorInsights) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson10_deserializeOpListContributorInsights) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson10_deserializeOpErrorListContributorInsights(response, &metadata) + } + output := &ListContributorInsightsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson10_deserializeOpDocumentListContributorInsightsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson10_deserializeOpErrorListContributorInsights(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerError", errorCode): + return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson10_deserializeOpListExports struct { +} + +func (*awsAwsjson10_deserializeOpListExports) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson10_deserializeOpListExports) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson10_deserializeOpErrorListExports(response, &metadata) + } + output := &ListExportsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson10_deserializeOpDocumentListExportsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson10_deserializeOpErrorListExports(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerError", errorCode): + return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody) + + case strings.EqualFold("LimitExceededException", errorCode): + return awsAwsjson10_deserializeErrorLimitExceededException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson10_deserializeOpListGlobalTables struct { +} + +func (*awsAwsjson10_deserializeOpListGlobalTables) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson10_deserializeOpListGlobalTables) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson10_deserializeOpErrorListGlobalTables(response, &metadata) + } + output := &ListGlobalTablesOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson10_deserializeOpDocumentListGlobalTablesOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson10_deserializeOpErrorListGlobalTables(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerError", errorCode): + return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody) + + case strings.EqualFold("InvalidEndpointException", errorCode): + return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson10_deserializeOpListImports struct { +} + +func (*awsAwsjson10_deserializeOpListImports) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson10_deserializeOpListImports) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson10_deserializeOpErrorListImports(response, &metadata) + } + output := &ListImportsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson10_deserializeOpDocumentListImportsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson10_deserializeOpErrorListImports(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("LimitExceededException", errorCode): + return awsAwsjson10_deserializeErrorLimitExceededException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson10_deserializeOpListTables struct { +} + +func (*awsAwsjson10_deserializeOpListTables) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson10_deserializeOpListTables) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson10_deserializeOpErrorListTables(response, &metadata) + } + output := &ListTablesOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson10_deserializeOpDocumentListTablesOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson10_deserializeOpErrorListTables(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerError", errorCode): + return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody) + + case strings.EqualFold("InvalidEndpointException", errorCode): + return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson10_deserializeOpListTagsOfResource struct { +} + +func (*awsAwsjson10_deserializeOpListTagsOfResource) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson10_deserializeOpListTagsOfResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson10_deserializeOpErrorListTagsOfResource(response, &metadata) + } + output := &ListTagsOfResourceOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson10_deserializeOpDocumentListTagsOfResourceOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson10_deserializeOpErrorListTagsOfResource(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerError", errorCode): + return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody) + + case strings.EqualFold("InvalidEndpointException", errorCode): + return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson10_deserializeOpPutItem struct { +} + +func (*awsAwsjson10_deserializeOpPutItem) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson10_deserializeOpPutItem) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson10_deserializeOpErrorPutItem(response, &metadata) + } + output := &PutItemOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson10_deserializeOpDocumentPutItemOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson10_deserializeOpErrorPutItem(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("ConditionalCheckFailedException", errorCode): + return awsAwsjson10_deserializeErrorConditionalCheckFailedException(response, errorBody) + + case strings.EqualFold("InternalServerError", errorCode): + return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody) + + case strings.EqualFold("InvalidEndpointException", errorCode): + return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody) + + case strings.EqualFold("ItemCollectionSizeLimitExceededException", errorCode): + return awsAwsjson10_deserializeErrorItemCollectionSizeLimitExceededException(response, errorBody) + + case strings.EqualFold("ProvisionedThroughputExceededException", errorCode): + return awsAwsjson10_deserializeErrorProvisionedThroughputExceededException(response, errorBody) + + case strings.EqualFold("ReplicatedWriteConflictException", errorCode): + return awsAwsjson10_deserializeErrorReplicatedWriteConflictException(response, errorBody) + + case strings.EqualFold("RequestLimitExceeded", errorCode): + return awsAwsjson10_deserializeErrorRequestLimitExceeded(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("ThrottlingException", errorCode): + return awsAwsjson10_deserializeErrorThrottlingException(response, errorBody) + + case strings.EqualFold("TransactionConflictException", errorCode): + return awsAwsjson10_deserializeErrorTransactionConflictException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson10_deserializeOpPutResourcePolicy struct { +} + +func (*awsAwsjson10_deserializeOpPutResourcePolicy) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson10_deserializeOpPutResourcePolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson10_deserializeOpErrorPutResourcePolicy(response, &metadata) + } + output := &PutResourcePolicyOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson10_deserializeOpDocumentPutResourcePolicyOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson10_deserializeOpErrorPutResourcePolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerError", errorCode): + return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody) + + case strings.EqualFold("InvalidEndpointException", errorCode): + return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody) + + case strings.EqualFold("LimitExceededException", errorCode): + return awsAwsjson10_deserializeErrorLimitExceededException(response, errorBody) + + case strings.EqualFold("PolicyNotFoundException", errorCode): + return awsAwsjson10_deserializeErrorPolicyNotFoundException(response, errorBody) + + case strings.EqualFold("ResourceInUseException", errorCode): + return awsAwsjson10_deserializeErrorResourceInUseException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson10_deserializeOpQuery struct { +} + +func (*awsAwsjson10_deserializeOpQuery) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson10_deserializeOpQuery) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson10_deserializeOpErrorQuery(response, &metadata) + } + output := &QueryOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson10_deserializeOpDocumentQueryOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson10_deserializeOpErrorQuery(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerError", errorCode): + return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody) + + case strings.EqualFold("InvalidEndpointException", errorCode): + return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody) + + case strings.EqualFold("ProvisionedThroughputExceededException", errorCode): + return awsAwsjson10_deserializeErrorProvisionedThroughputExceededException(response, errorBody) + + case strings.EqualFold("RequestLimitExceeded", errorCode): + return awsAwsjson10_deserializeErrorRequestLimitExceeded(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("ThrottlingException", errorCode): + return awsAwsjson10_deserializeErrorThrottlingException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson10_deserializeOpRestoreTableFromBackup struct { +} + +func (*awsAwsjson10_deserializeOpRestoreTableFromBackup) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson10_deserializeOpRestoreTableFromBackup) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson10_deserializeOpErrorRestoreTableFromBackup(response, &metadata) + } + output := &RestoreTableFromBackupOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson10_deserializeOpDocumentRestoreTableFromBackupOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson10_deserializeOpErrorRestoreTableFromBackup(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("BackupInUseException", errorCode): + return awsAwsjson10_deserializeErrorBackupInUseException(response, errorBody) + + case strings.EqualFold("BackupNotFoundException", errorCode): + return awsAwsjson10_deserializeErrorBackupNotFoundException(response, errorBody) + + case strings.EqualFold("InternalServerError", errorCode): + return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody) + + case strings.EqualFold("InvalidEndpointException", errorCode): + return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody) + + case strings.EqualFold("LimitExceededException", errorCode): + return awsAwsjson10_deserializeErrorLimitExceededException(response, errorBody) + + case strings.EqualFold("TableAlreadyExistsException", errorCode): + return awsAwsjson10_deserializeErrorTableAlreadyExistsException(response, errorBody) + + case strings.EqualFold("TableInUseException", errorCode): + return awsAwsjson10_deserializeErrorTableInUseException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson10_deserializeOpRestoreTableToPointInTime struct { +} + +func (*awsAwsjson10_deserializeOpRestoreTableToPointInTime) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson10_deserializeOpRestoreTableToPointInTime) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson10_deserializeOpErrorRestoreTableToPointInTime(response, &metadata) + } + output := &RestoreTableToPointInTimeOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson10_deserializeOpDocumentRestoreTableToPointInTimeOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson10_deserializeOpErrorRestoreTableToPointInTime(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerError", errorCode): + return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody) + + case strings.EqualFold("InvalidEndpointException", errorCode): + return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody) + + case strings.EqualFold("InvalidRestoreTimeException", errorCode): + return awsAwsjson10_deserializeErrorInvalidRestoreTimeException(response, errorBody) + + case strings.EqualFold("LimitExceededException", errorCode): + return awsAwsjson10_deserializeErrorLimitExceededException(response, errorBody) + + case strings.EqualFold("PointInTimeRecoveryUnavailableException", errorCode): + return awsAwsjson10_deserializeErrorPointInTimeRecoveryUnavailableException(response, errorBody) + + case strings.EqualFold("TableAlreadyExistsException", errorCode): + return awsAwsjson10_deserializeErrorTableAlreadyExistsException(response, errorBody) + + case strings.EqualFold("TableInUseException", errorCode): + return awsAwsjson10_deserializeErrorTableInUseException(response, errorBody) + + case strings.EqualFold("TableNotFoundException", errorCode): + return awsAwsjson10_deserializeErrorTableNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson10_deserializeOpScan struct { +} + +func (*awsAwsjson10_deserializeOpScan) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson10_deserializeOpScan) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson10_deserializeOpErrorScan(response, &metadata) + } + output := &ScanOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson10_deserializeOpDocumentScanOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson10_deserializeOpErrorScan(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerError", errorCode): + return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody) + + case strings.EqualFold("InvalidEndpointException", errorCode): + return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody) + + case strings.EqualFold("ProvisionedThroughputExceededException", errorCode): + return awsAwsjson10_deserializeErrorProvisionedThroughputExceededException(response, errorBody) + + case strings.EqualFold("RequestLimitExceeded", errorCode): + return awsAwsjson10_deserializeErrorRequestLimitExceeded(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("ThrottlingException", errorCode): + return awsAwsjson10_deserializeErrorThrottlingException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson10_deserializeOpTagResource struct { +} + +func (*awsAwsjson10_deserializeOpTagResource) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson10_deserializeOpTagResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson10_deserializeOpErrorTagResource(response, &metadata) + } + output := &TagResourceOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsAwsjson10_deserializeOpErrorTagResource(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerError", errorCode): + return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody) + + case strings.EqualFold("InvalidEndpointException", errorCode): + return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody) + + case strings.EqualFold("LimitExceededException", errorCode): + return awsAwsjson10_deserializeErrorLimitExceededException(response, errorBody) + + case strings.EqualFold("ResourceInUseException", errorCode): + return awsAwsjson10_deserializeErrorResourceInUseException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson10_deserializeOpTransactGetItems struct { +} + +func (*awsAwsjson10_deserializeOpTransactGetItems) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson10_deserializeOpTransactGetItems) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson10_deserializeOpErrorTransactGetItems(response, &metadata) + } + output := &TransactGetItemsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson10_deserializeOpDocumentTransactGetItemsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson10_deserializeOpErrorTransactGetItems(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerError", errorCode): + return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody) + + case strings.EqualFold("InvalidEndpointException", errorCode): + return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody) + + case strings.EqualFold("ProvisionedThroughputExceededException", errorCode): + return awsAwsjson10_deserializeErrorProvisionedThroughputExceededException(response, errorBody) + + case strings.EqualFold("RequestLimitExceeded", errorCode): + return awsAwsjson10_deserializeErrorRequestLimitExceeded(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("ThrottlingException", errorCode): + return awsAwsjson10_deserializeErrorThrottlingException(response, errorBody) + + case strings.EqualFold("TransactionCanceledException", errorCode): + return awsAwsjson10_deserializeErrorTransactionCanceledException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson10_deserializeOpTransactWriteItems struct { +} + +func (*awsAwsjson10_deserializeOpTransactWriteItems) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson10_deserializeOpTransactWriteItems) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson10_deserializeOpErrorTransactWriteItems(response, &metadata) + } + output := &TransactWriteItemsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson10_deserializeOpDocumentTransactWriteItemsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson10_deserializeOpErrorTransactWriteItems(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("IdempotentParameterMismatchException", errorCode): + return awsAwsjson10_deserializeErrorIdempotentParameterMismatchException(response, errorBody) + + case strings.EqualFold("InternalServerError", errorCode): + return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody) + + case strings.EqualFold("InvalidEndpointException", errorCode): + return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody) + + case strings.EqualFold("ProvisionedThroughputExceededException", errorCode): + return awsAwsjson10_deserializeErrorProvisionedThroughputExceededException(response, errorBody) + + case strings.EqualFold("RequestLimitExceeded", errorCode): + return awsAwsjson10_deserializeErrorRequestLimitExceeded(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("ThrottlingException", errorCode): + return awsAwsjson10_deserializeErrorThrottlingException(response, errorBody) + + case strings.EqualFold("TransactionCanceledException", errorCode): + return awsAwsjson10_deserializeErrorTransactionCanceledException(response, errorBody) + + case strings.EqualFold("TransactionInProgressException", errorCode): + return awsAwsjson10_deserializeErrorTransactionInProgressException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson10_deserializeOpUntagResource struct { +} + +func (*awsAwsjson10_deserializeOpUntagResource) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson10_deserializeOpUntagResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson10_deserializeOpErrorUntagResource(response, &metadata) + } + output := &UntagResourceOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsAwsjson10_deserializeOpErrorUntagResource(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerError", errorCode): + return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody) + + case strings.EqualFold("InvalidEndpointException", errorCode): + return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody) + + case strings.EqualFold("LimitExceededException", errorCode): + return awsAwsjson10_deserializeErrorLimitExceededException(response, errorBody) + + case strings.EqualFold("ResourceInUseException", errorCode): + return awsAwsjson10_deserializeErrorResourceInUseException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson10_deserializeOpUpdateContinuousBackups struct { +} + +func (*awsAwsjson10_deserializeOpUpdateContinuousBackups) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson10_deserializeOpUpdateContinuousBackups) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson10_deserializeOpErrorUpdateContinuousBackups(response, &metadata) + } + output := &UpdateContinuousBackupsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson10_deserializeOpDocumentUpdateContinuousBackupsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson10_deserializeOpErrorUpdateContinuousBackups(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("ContinuousBackupsUnavailableException", errorCode): + return awsAwsjson10_deserializeErrorContinuousBackupsUnavailableException(response, errorBody) + + case strings.EqualFold("InternalServerError", errorCode): + return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody) + + case strings.EqualFold("InvalidEndpointException", errorCode): + return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody) + + case strings.EqualFold("TableNotFoundException", errorCode): + return awsAwsjson10_deserializeErrorTableNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson10_deserializeOpUpdateContributorInsights struct { +} + +func (*awsAwsjson10_deserializeOpUpdateContributorInsights) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson10_deserializeOpUpdateContributorInsights) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson10_deserializeOpErrorUpdateContributorInsights(response, &metadata) + } + output := &UpdateContributorInsightsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson10_deserializeOpDocumentUpdateContributorInsightsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson10_deserializeOpErrorUpdateContributorInsights(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerError", errorCode): + return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson10_deserializeOpUpdateGlobalTable struct { +} + +func (*awsAwsjson10_deserializeOpUpdateGlobalTable) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson10_deserializeOpUpdateGlobalTable) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson10_deserializeOpErrorUpdateGlobalTable(response, &metadata) + } + output := &UpdateGlobalTableOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson10_deserializeOpDocumentUpdateGlobalTableOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson10_deserializeOpErrorUpdateGlobalTable(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("GlobalTableNotFoundException", errorCode): + return awsAwsjson10_deserializeErrorGlobalTableNotFoundException(response, errorBody) + + case strings.EqualFold("InternalServerError", errorCode): + return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody) + + case strings.EqualFold("InvalidEndpointException", errorCode): + return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody) + + case strings.EqualFold("ReplicaAlreadyExistsException", errorCode): + return awsAwsjson10_deserializeErrorReplicaAlreadyExistsException(response, errorBody) + + case strings.EqualFold("ReplicaNotFoundException", errorCode): + return awsAwsjson10_deserializeErrorReplicaNotFoundException(response, errorBody) + + case strings.EqualFold("TableNotFoundException", errorCode): + return awsAwsjson10_deserializeErrorTableNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson10_deserializeOpUpdateGlobalTableSettings struct { +} + +func (*awsAwsjson10_deserializeOpUpdateGlobalTableSettings) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson10_deserializeOpUpdateGlobalTableSettings) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson10_deserializeOpErrorUpdateGlobalTableSettings(response, &metadata) + } + output := &UpdateGlobalTableSettingsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson10_deserializeOpDocumentUpdateGlobalTableSettingsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson10_deserializeOpErrorUpdateGlobalTableSettings(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("GlobalTableNotFoundException", errorCode): + return awsAwsjson10_deserializeErrorGlobalTableNotFoundException(response, errorBody) + + case strings.EqualFold("IndexNotFoundException", errorCode): + return awsAwsjson10_deserializeErrorIndexNotFoundException(response, errorBody) + + case strings.EqualFold("InternalServerError", errorCode): + return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody) + + case strings.EqualFold("InvalidEndpointException", errorCode): + return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody) + + case strings.EqualFold("LimitExceededException", errorCode): + return awsAwsjson10_deserializeErrorLimitExceededException(response, errorBody) + + case strings.EqualFold("ReplicaNotFoundException", errorCode): + return awsAwsjson10_deserializeErrorReplicaNotFoundException(response, errorBody) + + case strings.EqualFold("ResourceInUseException", errorCode): + return awsAwsjson10_deserializeErrorResourceInUseException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson10_deserializeOpUpdateItem struct { +} + +func (*awsAwsjson10_deserializeOpUpdateItem) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson10_deserializeOpUpdateItem) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson10_deserializeOpErrorUpdateItem(response, &metadata) + } + output := &UpdateItemOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson10_deserializeOpDocumentUpdateItemOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson10_deserializeOpErrorUpdateItem(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("ConditionalCheckFailedException", errorCode): + return awsAwsjson10_deserializeErrorConditionalCheckFailedException(response, errorBody) + + case strings.EqualFold("InternalServerError", errorCode): + return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody) + + case strings.EqualFold("InvalidEndpointException", errorCode): + return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody) + + case strings.EqualFold("ItemCollectionSizeLimitExceededException", errorCode): + return awsAwsjson10_deserializeErrorItemCollectionSizeLimitExceededException(response, errorBody) + + case strings.EqualFold("ProvisionedThroughputExceededException", errorCode): + return awsAwsjson10_deserializeErrorProvisionedThroughputExceededException(response, errorBody) + + case strings.EqualFold("ReplicatedWriteConflictException", errorCode): + return awsAwsjson10_deserializeErrorReplicatedWriteConflictException(response, errorBody) + + case strings.EqualFold("RequestLimitExceeded", errorCode): + return awsAwsjson10_deserializeErrorRequestLimitExceeded(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("ThrottlingException", errorCode): + return awsAwsjson10_deserializeErrorThrottlingException(response, errorBody) + + case strings.EqualFold("TransactionConflictException", errorCode): + return awsAwsjson10_deserializeErrorTransactionConflictException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson10_deserializeOpUpdateKinesisStreamingDestination struct { +} + +func (*awsAwsjson10_deserializeOpUpdateKinesisStreamingDestination) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson10_deserializeOpUpdateKinesisStreamingDestination) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson10_deserializeOpErrorUpdateKinesisStreamingDestination(response, &metadata) + } + output := &UpdateKinesisStreamingDestinationOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson10_deserializeOpDocumentUpdateKinesisStreamingDestinationOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson10_deserializeOpErrorUpdateKinesisStreamingDestination(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerError", errorCode): + return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody) + + case strings.EqualFold("InvalidEndpointException", errorCode): + return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody) + + case strings.EqualFold("LimitExceededException", errorCode): + return awsAwsjson10_deserializeErrorLimitExceededException(response, errorBody) + + case strings.EqualFold("ResourceInUseException", errorCode): + return awsAwsjson10_deserializeErrorResourceInUseException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson10_deserializeOpUpdateTable struct { +} + +func (*awsAwsjson10_deserializeOpUpdateTable) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson10_deserializeOpUpdateTable) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson10_deserializeOpErrorUpdateTable(response, &metadata) + } + output := &UpdateTableOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson10_deserializeOpDocumentUpdateTableOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson10_deserializeOpErrorUpdateTable(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerError", errorCode): + return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody) + + case strings.EqualFold("InvalidEndpointException", errorCode): + return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody) + + case strings.EqualFold("LimitExceededException", errorCode): + return awsAwsjson10_deserializeErrorLimitExceededException(response, errorBody) + + case strings.EqualFold("ResourceInUseException", errorCode): + return awsAwsjson10_deserializeErrorResourceInUseException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson10_deserializeOpUpdateTableReplicaAutoScaling struct { +} + +func (*awsAwsjson10_deserializeOpUpdateTableReplicaAutoScaling) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson10_deserializeOpUpdateTableReplicaAutoScaling) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson10_deserializeOpErrorUpdateTableReplicaAutoScaling(response, &metadata) + } + output := &UpdateTableReplicaAutoScalingOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson10_deserializeOpDocumentUpdateTableReplicaAutoScalingOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson10_deserializeOpErrorUpdateTableReplicaAutoScaling(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerError", errorCode): + return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody) + + case strings.EqualFold("LimitExceededException", errorCode): + return awsAwsjson10_deserializeErrorLimitExceededException(response, errorBody) + + case strings.EqualFold("ResourceInUseException", errorCode): + return awsAwsjson10_deserializeErrorResourceInUseException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson10_deserializeOpUpdateTimeToLive struct { +} + +func (*awsAwsjson10_deserializeOpUpdateTimeToLive) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson10_deserializeOpUpdateTimeToLive) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson10_deserializeOpErrorUpdateTimeToLive(response, &metadata) + } + output := &UpdateTimeToLiveOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson10_deserializeOpDocumentUpdateTimeToLiveOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson10_deserializeOpErrorUpdateTimeToLive(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerError", errorCode): + return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody) + + case strings.EqualFold("InvalidEndpointException", errorCode): + return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody) + + case strings.EqualFold("LimitExceededException", errorCode): + return awsAwsjson10_deserializeErrorLimitExceededException(response, errorBody) + + case strings.EqualFold("ResourceInUseException", errorCode): + return awsAwsjson10_deserializeErrorResourceInUseException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsAwsjson10_deserializeErrorBackupInUseException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.BackupInUseException{} + err := awsAwsjson10_deserializeDocumentBackupInUseException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson10_deserializeErrorBackupNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.BackupNotFoundException{} + err := awsAwsjson10_deserializeDocumentBackupNotFoundException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson10_deserializeErrorConditionalCheckFailedException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.ConditionalCheckFailedException{} + err := awsAwsjson10_deserializeDocumentConditionalCheckFailedException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson10_deserializeErrorContinuousBackupsUnavailableException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.ContinuousBackupsUnavailableException{} + err := awsAwsjson10_deserializeDocumentContinuousBackupsUnavailableException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson10_deserializeErrorDuplicateItemException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.DuplicateItemException{} + err := awsAwsjson10_deserializeDocumentDuplicateItemException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson10_deserializeErrorExportConflictException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.ExportConflictException{} + err := awsAwsjson10_deserializeDocumentExportConflictException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson10_deserializeErrorExportNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.ExportNotFoundException{} + err := awsAwsjson10_deserializeDocumentExportNotFoundException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson10_deserializeErrorGlobalTableAlreadyExistsException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.GlobalTableAlreadyExistsException{} + err := awsAwsjson10_deserializeDocumentGlobalTableAlreadyExistsException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson10_deserializeErrorGlobalTableNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.GlobalTableNotFoundException{} + err := awsAwsjson10_deserializeDocumentGlobalTableNotFoundException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson10_deserializeErrorIdempotentParameterMismatchException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.IdempotentParameterMismatchException{} + err := awsAwsjson10_deserializeDocumentIdempotentParameterMismatchException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson10_deserializeErrorImportConflictException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.ImportConflictException{} + err := awsAwsjson10_deserializeDocumentImportConflictException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson10_deserializeErrorImportNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.ImportNotFoundException{} + err := awsAwsjson10_deserializeDocumentImportNotFoundException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson10_deserializeErrorIndexNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.IndexNotFoundException{} + err := awsAwsjson10_deserializeDocumentIndexNotFoundException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson10_deserializeErrorInternalServerError(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.InternalServerError{} + err := awsAwsjson10_deserializeDocumentInternalServerError(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson10_deserializeErrorInvalidEndpointException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.InvalidEndpointException{} + err := awsAwsjson10_deserializeDocumentInvalidEndpointException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson10_deserializeErrorInvalidExportTimeException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.InvalidExportTimeException{} + err := awsAwsjson10_deserializeDocumentInvalidExportTimeException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson10_deserializeErrorInvalidRestoreTimeException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.InvalidRestoreTimeException{} + err := awsAwsjson10_deserializeDocumentInvalidRestoreTimeException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson10_deserializeErrorItemCollectionSizeLimitExceededException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.ItemCollectionSizeLimitExceededException{} + err := awsAwsjson10_deserializeDocumentItemCollectionSizeLimitExceededException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson10_deserializeErrorLimitExceededException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.LimitExceededException{} + err := awsAwsjson10_deserializeDocumentLimitExceededException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson10_deserializeErrorPointInTimeRecoveryUnavailableException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.PointInTimeRecoveryUnavailableException{} + err := awsAwsjson10_deserializeDocumentPointInTimeRecoveryUnavailableException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson10_deserializeErrorPolicyNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.PolicyNotFoundException{} + err := awsAwsjson10_deserializeDocumentPolicyNotFoundException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson10_deserializeErrorProvisionedThroughputExceededException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.ProvisionedThroughputExceededException{} + err := awsAwsjson10_deserializeDocumentProvisionedThroughputExceededException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson10_deserializeErrorReplicaAlreadyExistsException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.ReplicaAlreadyExistsException{} + err := awsAwsjson10_deserializeDocumentReplicaAlreadyExistsException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson10_deserializeErrorReplicaNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.ReplicaNotFoundException{} + err := awsAwsjson10_deserializeDocumentReplicaNotFoundException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson10_deserializeErrorReplicatedWriteConflictException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.ReplicatedWriteConflictException{} + err := awsAwsjson10_deserializeDocumentReplicatedWriteConflictException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson10_deserializeErrorRequestLimitExceeded(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.RequestLimitExceeded{} + err := awsAwsjson10_deserializeDocumentRequestLimitExceeded(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson10_deserializeErrorResourceInUseException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.ResourceInUseException{} + err := awsAwsjson10_deserializeDocumentResourceInUseException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson10_deserializeErrorResourceNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.ResourceNotFoundException{} + err := awsAwsjson10_deserializeDocumentResourceNotFoundException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson10_deserializeErrorTableAlreadyExistsException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.TableAlreadyExistsException{} + err := awsAwsjson10_deserializeDocumentTableAlreadyExistsException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson10_deserializeErrorTableInUseException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.TableInUseException{} + err := awsAwsjson10_deserializeDocumentTableInUseException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson10_deserializeErrorTableNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.TableNotFoundException{} + err := awsAwsjson10_deserializeDocumentTableNotFoundException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson10_deserializeErrorThrottlingException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.ThrottlingException{} + err := awsAwsjson10_deserializeDocumentThrottlingException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson10_deserializeErrorTransactionCanceledException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.TransactionCanceledException{} + err := awsAwsjson10_deserializeDocumentTransactionCanceledException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson10_deserializeErrorTransactionConflictException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.TransactionConflictException{} + err := awsAwsjson10_deserializeDocumentTransactionConflictException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson10_deserializeErrorTransactionInProgressException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.TransactionInProgressException{} + err := awsAwsjson10_deserializeDocumentTransactionInProgressException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson10_deserializeDocumentArchivalSummary(v **types.ArchivalSummary, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ArchivalSummary + if *v == nil { + sv = &types.ArchivalSummary{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ArchivalBackupArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected BackupArn to be of type string, got %T instead", value) + } + sv.ArchivalBackupArn = ptr.String(jtv) + } + + case "ArchivalDateTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.ArchivalDateTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Date to be a JSON Number, got %T instead", value) + + } + } + + case "ArchivalReason": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ArchivalReason to be of type string, got %T instead", value) + } + sv.ArchivalReason = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentAttributeDefinition(v **types.AttributeDefinition, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.AttributeDefinition + if *v == nil { + sv = &types.AttributeDefinition{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "AttributeName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeySchemaAttributeName to be of type string, got %T instead", value) + } + sv.AttributeName = ptr.String(jtv) + } + + case "AttributeType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ScalarAttributeType to be of type string, got %T instead", value) + } + sv.AttributeType = types.ScalarAttributeType(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentAttributeDefinitions(v *[]types.AttributeDefinition, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.AttributeDefinition + if *v == nil { + cv = []types.AttributeDefinition{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.AttributeDefinition + destAddr := &col + if err := awsAwsjson10_deserializeDocumentAttributeDefinition(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson10_deserializeDocumentAttributeMap(v *map[string]types.AttributeValue, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var mv map[string]types.AttributeValue + if *v == nil { + mv = map[string]types.AttributeValue{} + } else { + mv = *v + } + + for key, value := range shape { + var parsedVal types.AttributeValue + mapVar := parsedVal + if err := awsAwsjson10_deserializeDocumentAttributeValue(&mapVar, value); err != nil { + return err + } + parsedVal = mapVar + mv[key] = parsedVal + + } + *v = mv + return nil +} + +func awsAwsjson10_deserializeDocumentAttributeNameList(v *[]string, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []string + if *v == nil { + cv = []string{} + } else { + cv = *v + } + + for _, value := range shape { + var col string + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected AttributeName to be of type string, got %T instead", value) + } + col = jtv + } + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson10_deserializeDocumentAttributeValue(v *types.AttributeValue, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var uv types.AttributeValue +loop: + for key, value := range shape { + if value == nil { + continue + } + switch key { + case "B": + var mv []byte + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected BinaryAttributeValue to be []byte, got %T instead", value) + } + dv, err := base64.StdEncoding.DecodeString(jtv) + if err != nil { + return fmt.Errorf("failed to base64 decode BinaryAttributeValue, %w", err) + } + mv = dv + } + uv = &types.AttributeValueMemberB{Value: mv} + break loop + + case "BOOL": + var mv bool + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected BooleanAttributeValue to be of type *bool, got %T instead", value) + } + mv = jtv + } + uv = &types.AttributeValueMemberBOOL{Value: mv} + break loop + + case "BS": + var mv [][]byte + if err := awsAwsjson10_deserializeDocumentBinarySetAttributeValue(&mv, value); err != nil { + return err + } + uv = &types.AttributeValueMemberBS{Value: mv} + break loop + + case "L": + var mv []types.AttributeValue + if err := awsAwsjson10_deserializeDocumentListAttributeValue(&mv, value); err != nil { + return err + } + uv = &types.AttributeValueMemberL{Value: mv} + break loop + + case "M": + var mv map[string]types.AttributeValue + if err := awsAwsjson10_deserializeDocumentMapAttributeValue(&mv, value); err != nil { + return err + } + uv = &types.AttributeValueMemberM{Value: mv} + break loop + + case "N": + var mv string + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NumberAttributeValue to be of type string, got %T instead", value) + } + mv = jtv + } + uv = &types.AttributeValueMemberN{Value: mv} + break loop + + case "NS": + var mv []string + if err := awsAwsjson10_deserializeDocumentNumberSetAttributeValue(&mv, value); err != nil { + return err + } + uv = &types.AttributeValueMemberNS{Value: mv} + break loop + + case "NULL": + var mv bool + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected NullAttributeValue to be of type *bool, got %T instead", value) + } + mv = jtv + } + uv = &types.AttributeValueMemberNULL{Value: mv} + break loop + + case "S": + var mv string + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected StringAttributeValue to be of type string, got %T instead", value) + } + mv = jtv + } + uv = &types.AttributeValueMemberS{Value: mv} + break loop + + case "SS": + var mv []string + if err := awsAwsjson10_deserializeDocumentStringSetAttributeValue(&mv, value); err != nil { + return err + } + uv = &types.AttributeValueMemberSS{Value: mv} + break loop + + default: + uv = &types.UnknownUnionMember{Tag: key} + break loop + + } + } + *v = uv + return nil +} + +func awsAwsjson10_deserializeDocumentAutoScalingPolicyDescription(v **types.AutoScalingPolicyDescription, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.AutoScalingPolicyDescription + if *v == nil { + sv = &types.AutoScalingPolicyDescription{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "PolicyName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected AutoScalingPolicyName to be of type string, got %T instead", value) + } + sv.PolicyName = ptr.String(jtv) + } + + case "TargetTrackingScalingPolicyConfiguration": + if err := awsAwsjson10_deserializeDocumentAutoScalingTargetTrackingScalingPolicyConfigurationDescription(&sv.TargetTrackingScalingPolicyConfiguration, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentAutoScalingPolicyDescriptionList(v *[]types.AutoScalingPolicyDescription, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.AutoScalingPolicyDescription + if *v == nil { + cv = []types.AutoScalingPolicyDescription{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.AutoScalingPolicyDescription + destAddr := &col + if err := awsAwsjson10_deserializeDocumentAutoScalingPolicyDescription(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson10_deserializeDocumentAutoScalingSettingsDescription(v **types.AutoScalingSettingsDescription, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.AutoScalingSettingsDescription + if *v == nil { + sv = &types.AutoScalingSettingsDescription{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "AutoScalingDisabled": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected BooleanObject to be of type *bool, got %T instead", value) + } + sv.AutoScalingDisabled = ptr.Bool(jtv) + } + + case "AutoScalingRoleArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.AutoScalingRoleArn = ptr.String(jtv) + } + + case "MaximumUnits": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected PositiveLongObject to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.MaximumUnits = ptr.Int64(i64) + } + + case "MinimumUnits": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected PositiveLongObject to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.MinimumUnits = ptr.Int64(i64) + } + + case "ScalingPolicies": + if err := awsAwsjson10_deserializeDocumentAutoScalingPolicyDescriptionList(&sv.ScalingPolicies, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentAutoScalingTargetTrackingScalingPolicyConfigurationDescription(v **types.AutoScalingTargetTrackingScalingPolicyConfigurationDescription, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.AutoScalingTargetTrackingScalingPolicyConfigurationDescription + if *v == nil { + sv = &types.AutoScalingTargetTrackingScalingPolicyConfigurationDescription{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "DisableScaleIn": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected BooleanObject to be of type *bool, got %T instead", value) + } + sv.DisableScaleIn = ptr.Bool(jtv) + } + + case "ScaleInCooldown": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected IntegerObject to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.ScaleInCooldown = ptr.Int32(int32(i64)) + } + + case "ScaleOutCooldown": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected IntegerObject to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.ScaleOutCooldown = ptr.Int32(int32(i64)) + } + + case "TargetValue": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.TargetValue = ptr.Float64(f64) + + case string: + var f64 float64 + switch { + case strings.EqualFold(jtv, "NaN"): + f64 = math.NaN() + + case strings.EqualFold(jtv, "Infinity"): + f64 = math.Inf(1) + + case strings.EqualFold(jtv, "-Infinity"): + f64 = math.Inf(-1) + + default: + return fmt.Errorf("unknown JSON number value: %s", jtv) + + } + sv.TargetValue = ptr.Float64(f64) + + default: + return fmt.Errorf("expected DoubleObject to be a JSON Number, got %T instead", value) + + } + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentBackupDescription(v **types.BackupDescription, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.BackupDescription + if *v == nil { + sv = &types.BackupDescription{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "BackupDetails": + if err := awsAwsjson10_deserializeDocumentBackupDetails(&sv.BackupDetails, value); err != nil { + return err + } + + case "SourceTableDetails": + if err := awsAwsjson10_deserializeDocumentSourceTableDetails(&sv.SourceTableDetails, value); err != nil { + return err + } + + case "SourceTableFeatureDetails": + if err := awsAwsjson10_deserializeDocumentSourceTableFeatureDetails(&sv.SourceTableFeatureDetails, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentBackupDetails(v **types.BackupDetails, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.BackupDetails + if *v == nil { + sv = &types.BackupDetails{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "BackupArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected BackupArn to be of type string, got %T instead", value) + } + sv.BackupArn = ptr.String(jtv) + } + + case "BackupCreationDateTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.BackupCreationDateTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected BackupCreationDateTime to be a JSON Number, got %T instead", value) + + } + } + + case "BackupExpiryDateTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.BackupExpiryDateTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Date to be a JSON Number, got %T instead", value) + + } + } + + case "BackupName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected BackupName to be of type string, got %T instead", value) + } + sv.BackupName = ptr.String(jtv) + } + + case "BackupSizeBytes": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected BackupSizeBytes to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.BackupSizeBytes = ptr.Int64(i64) + } + + case "BackupStatus": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected BackupStatus to be of type string, got %T instead", value) + } + sv.BackupStatus = types.BackupStatus(jtv) + } + + case "BackupType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected BackupType to be of type string, got %T instead", value) + } + sv.BackupType = types.BackupType(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentBackupInUseException(v **types.BackupInUseException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.BackupInUseException + if *v == nil { + sv = &types.BackupInUseException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message", "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentBackupNotFoundException(v **types.BackupNotFoundException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.BackupNotFoundException + if *v == nil { + sv = &types.BackupNotFoundException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message", "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentBackupSummaries(v *[]types.BackupSummary, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.BackupSummary + if *v == nil { + cv = []types.BackupSummary{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.BackupSummary + destAddr := &col + if err := awsAwsjson10_deserializeDocumentBackupSummary(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson10_deserializeDocumentBackupSummary(v **types.BackupSummary, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.BackupSummary + if *v == nil { + sv = &types.BackupSummary{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "BackupArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected BackupArn to be of type string, got %T instead", value) + } + sv.BackupArn = ptr.String(jtv) + } + + case "BackupCreationDateTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.BackupCreationDateTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected BackupCreationDateTime to be a JSON Number, got %T instead", value) + + } + } + + case "BackupExpiryDateTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.BackupExpiryDateTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Date to be a JSON Number, got %T instead", value) + + } + } + + case "BackupName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected BackupName to be of type string, got %T instead", value) + } + sv.BackupName = ptr.String(jtv) + } + + case "BackupSizeBytes": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected BackupSizeBytes to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.BackupSizeBytes = ptr.Int64(i64) + } + + case "BackupStatus": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected BackupStatus to be of type string, got %T instead", value) + } + sv.BackupStatus = types.BackupStatus(jtv) + } + + case "BackupType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected BackupType to be of type string, got %T instead", value) + } + sv.BackupType = types.BackupType(jtv) + } + + case "TableArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TableArn to be of type string, got %T instead", value) + } + sv.TableArn = ptr.String(jtv) + } + + case "TableId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TableId to be of type string, got %T instead", value) + } + sv.TableId = ptr.String(jtv) + } + + case "TableName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TableName to be of type string, got %T instead", value) + } + sv.TableName = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentBatchGetRequestMap(v *map[string]types.KeysAndAttributes, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var mv map[string]types.KeysAndAttributes + if *v == nil { + mv = map[string]types.KeysAndAttributes{} + } else { + mv = *v + } + + for key, value := range shape { + var parsedVal types.KeysAndAttributes + mapVar := parsedVal + destAddr := &mapVar + if err := awsAwsjson10_deserializeDocumentKeysAndAttributes(&destAddr, value); err != nil { + return err + } + parsedVal = *destAddr + mv[key] = parsedVal + + } + *v = mv + return nil +} + +func awsAwsjson10_deserializeDocumentBatchGetResponseMap(v *map[string][]map[string]types.AttributeValue, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var mv map[string][]map[string]types.AttributeValue + if *v == nil { + mv = map[string][]map[string]types.AttributeValue{} + } else { + mv = *v + } + + for key, value := range shape { + var parsedVal []map[string]types.AttributeValue + mapVar := parsedVal + if err := awsAwsjson10_deserializeDocumentItemList(&mapVar, value); err != nil { + return err + } + parsedVal = mapVar + mv[key] = parsedVal + + } + *v = mv + return nil +} + +func awsAwsjson10_deserializeDocumentBatchStatementError(v **types.BatchStatementError, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.BatchStatementError + if *v == nil { + sv = &types.BatchStatementError{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Code": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected BatchStatementErrorCodeEnum to be of type string, got %T instead", value) + } + sv.Code = types.BatchStatementErrorCodeEnum(jtv) + } + + case "Item": + if err := awsAwsjson10_deserializeDocumentAttributeMap(&sv.Item, value); err != nil { + return err + } + + case "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentBatchStatementResponse(v **types.BatchStatementResponse, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.BatchStatementResponse + if *v == nil { + sv = &types.BatchStatementResponse{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Error": + if err := awsAwsjson10_deserializeDocumentBatchStatementError(&sv.Error, value); err != nil { + return err + } + + case "Item": + if err := awsAwsjson10_deserializeDocumentAttributeMap(&sv.Item, value); err != nil { + return err + } + + case "TableName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TableName to be of type string, got %T instead", value) + } + sv.TableName = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentBatchWriteItemRequestMap(v *map[string][]types.WriteRequest, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var mv map[string][]types.WriteRequest + if *v == nil { + mv = map[string][]types.WriteRequest{} + } else { + mv = *v + } + + for key, value := range shape { + var parsedVal []types.WriteRequest + mapVar := parsedVal + if err := awsAwsjson10_deserializeDocumentWriteRequests(&mapVar, value); err != nil { + return err + } + parsedVal = mapVar + mv[key] = parsedVal + + } + *v = mv + return nil +} + +func awsAwsjson10_deserializeDocumentBillingModeSummary(v **types.BillingModeSummary, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.BillingModeSummary + if *v == nil { + sv = &types.BillingModeSummary{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "BillingMode": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected BillingMode to be of type string, got %T instead", value) + } + sv.BillingMode = types.BillingMode(jtv) + } + + case "LastUpdateToPayPerRequestDateTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.LastUpdateToPayPerRequestDateTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Date to be a JSON Number, got %T instead", value) + + } + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentBinarySetAttributeValue(v *[][]byte, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv [][]byte + if *v == nil { + cv = [][]byte{} + } else { + cv = *v + } + + for _, value := range shape { + var col []byte + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected BinaryAttributeValue to be []byte, got %T instead", value) + } + dv, err := base64.StdEncoding.DecodeString(jtv) + if err != nil { + return fmt.Errorf("failed to base64 decode BinaryAttributeValue, %w", err) + } + col = dv + } + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson10_deserializeDocumentCancellationReason(v **types.CancellationReason, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.CancellationReason + if *v == nil { + sv = &types.CancellationReason{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Code": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Code to be of type string, got %T instead", value) + } + sv.Code = ptr.String(jtv) + } + + case "Item": + if err := awsAwsjson10_deserializeDocumentAttributeMap(&sv.Item, value); err != nil { + return err + } + + case "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentCancellationReasonList(v *[]types.CancellationReason, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.CancellationReason + if *v == nil { + cv = []types.CancellationReason{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.CancellationReason + destAddr := &col + if err := awsAwsjson10_deserializeDocumentCancellationReason(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson10_deserializeDocumentCapacity(v **types.Capacity, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.Capacity + if *v == nil { + sv = &types.Capacity{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "CapacityUnits": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.CapacityUnits = ptr.Float64(f64) + + case string: + var f64 float64 + switch { + case strings.EqualFold(jtv, "NaN"): + f64 = math.NaN() + + case strings.EqualFold(jtv, "Infinity"): + f64 = math.Inf(1) + + case strings.EqualFold(jtv, "-Infinity"): + f64 = math.Inf(-1) + + default: + return fmt.Errorf("unknown JSON number value: %s", jtv) + + } + sv.CapacityUnits = ptr.Float64(f64) + + default: + return fmt.Errorf("expected ConsumedCapacityUnits to be a JSON Number, got %T instead", value) + + } + } + + case "ReadCapacityUnits": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.ReadCapacityUnits = ptr.Float64(f64) + + case string: + var f64 float64 + switch { + case strings.EqualFold(jtv, "NaN"): + f64 = math.NaN() + + case strings.EqualFold(jtv, "Infinity"): + f64 = math.Inf(1) + + case strings.EqualFold(jtv, "-Infinity"): + f64 = math.Inf(-1) + + default: + return fmt.Errorf("unknown JSON number value: %s", jtv) + + } + sv.ReadCapacityUnits = ptr.Float64(f64) + + default: + return fmt.Errorf("expected ConsumedCapacityUnits to be a JSON Number, got %T instead", value) + + } + } + + case "WriteCapacityUnits": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.WriteCapacityUnits = ptr.Float64(f64) + + case string: + var f64 float64 + switch { + case strings.EqualFold(jtv, "NaN"): + f64 = math.NaN() + + case strings.EqualFold(jtv, "Infinity"): + f64 = math.Inf(1) + + case strings.EqualFold(jtv, "-Infinity"): + f64 = math.Inf(-1) + + default: + return fmt.Errorf("unknown JSON number value: %s", jtv) + + } + sv.WriteCapacityUnits = ptr.Float64(f64) + + default: + return fmt.Errorf("expected ConsumedCapacityUnits to be a JSON Number, got %T instead", value) + + } + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentConditionalCheckFailedException(v **types.ConditionalCheckFailedException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ConditionalCheckFailedException + if *v == nil { + sv = &types.ConditionalCheckFailedException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Item": + if err := awsAwsjson10_deserializeDocumentAttributeMap(&sv.Item, value); err != nil { + return err + } + + case "message", "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentConsumedCapacity(v **types.ConsumedCapacity, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ConsumedCapacity + if *v == nil { + sv = &types.ConsumedCapacity{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "CapacityUnits": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.CapacityUnits = ptr.Float64(f64) + + case string: + var f64 float64 + switch { + case strings.EqualFold(jtv, "NaN"): + f64 = math.NaN() + + case strings.EqualFold(jtv, "Infinity"): + f64 = math.Inf(1) + + case strings.EqualFold(jtv, "-Infinity"): + f64 = math.Inf(-1) + + default: + return fmt.Errorf("unknown JSON number value: %s", jtv) + + } + sv.CapacityUnits = ptr.Float64(f64) + + default: + return fmt.Errorf("expected ConsumedCapacityUnits to be a JSON Number, got %T instead", value) + + } + } + + case "GlobalSecondaryIndexes": + if err := awsAwsjson10_deserializeDocumentSecondaryIndexesCapacityMap(&sv.GlobalSecondaryIndexes, value); err != nil { + return err + } + + case "LocalSecondaryIndexes": + if err := awsAwsjson10_deserializeDocumentSecondaryIndexesCapacityMap(&sv.LocalSecondaryIndexes, value); err != nil { + return err + } + + case "ReadCapacityUnits": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.ReadCapacityUnits = ptr.Float64(f64) + + case string: + var f64 float64 + switch { + case strings.EqualFold(jtv, "NaN"): + f64 = math.NaN() + + case strings.EqualFold(jtv, "Infinity"): + f64 = math.Inf(1) + + case strings.EqualFold(jtv, "-Infinity"): + f64 = math.Inf(-1) + + default: + return fmt.Errorf("unknown JSON number value: %s", jtv) + + } + sv.ReadCapacityUnits = ptr.Float64(f64) + + default: + return fmt.Errorf("expected ConsumedCapacityUnits to be a JSON Number, got %T instead", value) + + } + } + + case "Table": + if err := awsAwsjson10_deserializeDocumentCapacity(&sv.Table, value); err != nil { + return err + } + + case "TableName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TableArn to be of type string, got %T instead", value) + } + sv.TableName = ptr.String(jtv) + } + + case "WriteCapacityUnits": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.WriteCapacityUnits = ptr.Float64(f64) + + case string: + var f64 float64 + switch { + case strings.EqualFold(jtv, "NaN"): + f64 = math.NaN() + + case strings.EqualFold(jtv, "Infinity"): + f64 = math.Inf(1) + + case strings.EqualFold(jtv, "-Infinity"): + f64 = math.Inf(-1) + + default: + return fmt.Errorf("unknown JSON number value: %s", jtv) + + } + sv.WriteCapacityUnits = ptr.Float64(f64) + + default: + return fmt.Errorf("expected ConsumedCapacityUnits to be a JSON Number, got %T instead", value) + + } + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentConsumedCapacityMultiple(v *[]types.ConsumedCapacity, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.ConsumedCapacity + if *v == nil { + cv = []types.ConsumedCapacity{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.ConsumedCapacity + destAddr := &col + if err := awsAwsjson10_deserializeDocumentConsumedCapacity(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson10_deserializeDocumentContinuousBackupsDescription(v **types.ContinuousBackupsDescription, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ContinuousBackupsDescription + if *v == nil { + sv = &types.ContinuousBackupsDescription{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ContinuousBackupsStatus": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ContinuousBackupsStatus to be of type string, got %T instead", value) + } + sv.ContinuousBackupsStatus = types.ContinuousBackupsStatus(jtv) + } + + case "PointInTimeRecoveryDescription": + if err := awsAwsjson10_deserializeDocumentPointInTimeRecoveryDescription(&sv.PointInTimeRecoveryDescription, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentContinuousBackupsUnavailableException(v **types.ContinuousBackupsUnavailableException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ContinuousBackupsUnavailableException + if *v == nil { + sv = &types.ContinuousBackupsUnavailableException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message", "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentContributorInsightsRuleList(v *[]string, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []string + if *v == nil { + cv = []string{} + } else { + cv = *v + } + + for _, value := range shape { + var col string + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ContributorInsightsRule to be of type string, got %T instead", value) + } + col = jtv + } + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson10_deserializeDocumentContributorInsightsSummaries(v *[]types.ContributorInsightsSummary, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.ContributorInsightsSummary + if *v == nil { + cv = []types.ContributorInsightsSummary{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.ContributorInsightsSummary + destAddr := &col + if err := awsAwsjson10_deserializeDocumentContributorInsightsSummary(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson10_deserializeDocumentContributorInsightsSummary(v **types.ContributorInsightsSummary, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ContributorInsightsSummary + if *v == nil { + sv = &types.ContributorInsightsSummary{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ContributorInsightsMode": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ContributorInsightsMode to be of type string, got %T instead", value) + } + sv.ContributorInsightsMode = types.ContributorInsightsMode(jtv) + } + + case "ContributorInsightsStatus": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ContributorInsightsStatus to be of type string, got %T instead", value) + } + sv.ContributorInsightsStatus = types.ContributorInsightsStatus(jtv) + } + + case "IndexName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected IndexName to be of type string, got %T instead", value) + } + sv.IndexName = ptr.String(jtv) + } + + case "TableName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TableName to be of type string, got %T instead", value) + } + sv.TableName = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentCsvHeaderList(v *[]string, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []string + if *v == nil { + cv = []string{} + } else { + cv = *v + } + + for _, value := range shape { + var col string + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CsvHeader to be of type string, got %T instead", value) + } + col = jtv + } + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson10_deserializeDocumentCsvOptions(v **types.CsvOptions, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.CsvOptions + if *v == nil { + sv = &types.CsvOptions{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Delimiter": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CsvDelimiter to be of type string, got %T instead", value) + } + sv.Delimiter = ptr.String(jtv) + } + + case "HeaderList": + if err := awsAwsjson10_deserializeDocumentCsvHeaderList(&sv.HeaderList, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentDeleteRequest(v **types.DeleteRequest, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.DeleteRequest + if *v == nil { + sv = &types.DeleteRequest{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Key": + if err := awsAwsjson10_deserializeDocumentKey(&sv.Key, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentDuplicateItemException(v **types.DuplicateItemException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.DuplicateItemException + if *v == nil { + sv = &types.DuplicateItemException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message", "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentEnableKinesisStreamingConfiguration(v **types.EnableKinesisStreamingConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.EnableKinesisStreamingConfiguration + if *v == nil { + sv = &types.EnableKinesisStreamingConfiguration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ApproximateCreationDateTimePrecision": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ApproximateCreationDateTimePrecision to be of type string, got %T instead", value) + } + sv.ApproximateCreationDateTimePrecision = types.ApproximateCreationDateTimePrecision(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentEndpoint(v **types.Endpoint, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.Endpoint + if *v == nil { + sv = &types.Endpoint{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Address": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Address = ptr.String(jtv) + } + + case "CachePeriodInMinutes": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Long to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.CachePeriodInMinutes = i64 + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentEndpoints(v *[]types.Endpoint, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.Endpoint + if *v == nil { + cv = []types.Endpoint{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.Endpoint + destAddr := &col + if err := awsAwsjson10_deserializeDocumentEndpoint(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson10_deserializeDocumentExportConflictException(v **types.ExportConflictException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ExportConflictException + if *v == nil { + sv = &types.ExportConflictException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message", "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentExportDescription(v **types.ExportDescription, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ExportDescription + if *v == nil { + sv = &types.ExportDescription{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "BilledSizeBytes": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected BilledSizeBytes to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.BilledSizeBytes = ptr.Int64(i64) + } + + case "ClientToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ClientToken to be of type string, got %T instead", value) + } + sv.ClientToken = ptr.String(jtv) + } + + case "EndTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.EndTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected ExportEndTime to be a JSON Number, got %T instead", value) + + } + } + + case "ExportArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExportArn to be of type string, got %T instead", value) + } + sv.ExportArn = ptr.String(jtv) + } + + case "ExportFormat": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExportFormat to be of type string, got %T instead", value) + } + sv.ExportFormat = types.ExportFormat(jtv) + } + + case "ExportManifest": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExportManifest to be of type string, got %T instead", value) + } + sv.ExportManifest = ptr.String(jtv) + } + + case "ExportStatus": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExportStatus to be of type string, got %T instead", value) + } + sv.ExportStatus = types.ExportStatus(jtv) + } + + case "ExportTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.ExportTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected ExportTime to be a JSON Number, got %T instead", value) + + } + } + + case "ExportType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExportType to be of type string, got %T instead", value) + } + sv.ExportType = types.ExportType(jtv) + } + + case "FailureCode": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected FailureCode to be of type string, got %T instead", value) + } + sv.FailureCode = ptr.String(jtv) + } + + case "FailureMessage": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected FailureMessage to be of type string, got %T instead", value) + } + sv.FailureMessage = ptr.String(jtv) + } + + case "IncrementalExportSpecification": + if err := awsAwsjson10_deserializeDocumentIncrementalExportSpecification(&sv.IncrementalExportSpecification, value); err != nil { + return err + } + + case "ItemCount": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected ItemCount to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.ItemCount = ptr.Int64(i64) + } + + case "S3Bucket": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected S3Bucket to be of type string, got %T instead", value) + } + sv.S3Bucket = ptr.String(jtv) + } + + case "S3BucketOwner": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected S3BucketOwner to be of type string, got %T instead", value) + } + sv.S3BucketOwner = ptr.String(jtv) + } + + case "S3Prefix": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected S3Prefix to be of type string, got %T instead", value) + } + sv.S3Prefix = ptr.String(jtv) + } + + case "S3SseAlgorithm": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected S3SseAlgorithm to be of type string, got %T instead", value) + } + sv.S3SseAlgorithm = types.S3SseAlgorithm(jtv) + } + + case "S3SseKmsKeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected S3SseKmsKeyId to be of type string, got %T instead", value) + } + sv.S3SseKmsKeyId = ptr.String(jtv) + } + + case "StartTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.StartTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected ExportStartTime to be a JSON Number, got %T instead", value) + + } + } + + case "TableArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TableArn to be of type string, got %T instead", value) + } + sv.TableArn = ptr.String(jtv) + } + + case "TableId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TableId to be of type string, got %T instead", value) + } + sv.TableId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentExportNotFoundException(v **types.ExportNotFoundException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ExportNotFoundException + if *v == nil { + sv = &types.ExportNotFoundException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message", "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentExportSummaries(v *[]types.ExportSummary, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.ExportSummary + if *v == nil { + cv = []types.ExportSummary{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.ExportSummary + destAddr := &col + if err := awsAwsjson10_deserializeDocumentExportSummary(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson10_deserializeDocumentExportSummary(v **types.ExportSummary, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ExportSummary + if *v == nil { + sv = &types.ExportSummary{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ExportArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExportArn to be of type string, got %T instead", value) + } + sv.ExportArn = ptr.String(jtv) + } + + case "ExportStatus": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExportStatus to be of type string, got %T instead", value) + } + sv.ExportStatus = types.ExportStatus(jtv) + } + + case "ExportType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExportType to be of type string, got %T instead", value) + } + sv.ExportType = types.ExportType(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentExpressionAttributeNameMap(v *map[string]string, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var mv map[string]string + if *v == nil { + mv = map[string]string{} + } else { + mv = *v + } + + for key, value := range shape { + var parsedVal string + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected AttributeName to be of type string, got %T instead", value) + } + parsedVal = jtv + } + mv[key] = parsedVal + + } + *v = mv + return nil +} + +func awsAwsjson10_deserializeDocumentFailureException(v **types.FailureException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.FailureException + if *v == nil { + sv = &types.FailureException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ExceptionDescription": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionDescription to be of type string, got %T instead", value) + } + sv.ExceptionDescription = ptr.String(jtv) + } + + case "ExceptionName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionName to be of type string, got %T instead", value) + } + sv.ExceptionName = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentGlobalSecondaryIndex(v **types.GlobalSecondaryIndex, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.GlobalSecondaryIndex + if *v == nil { + sv = &types.GlobalSecondaryIndex{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "IndexName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected IndexName to be of type string, got %T instead", value) + } + sv.IndexName = ptr.String(jtv) + } + + case "KeySchema": + if err := awsAwsjson10_deserializeDocumentKeySchema(&sv.KeySchema, value); err != nil { + return err + } + + case "OnDemandThroughput": + if err := awsAwsjson10_deserializeDocumentOnDemandThroughput(&sv.OnDemandThroughput, value); err != nil { + return err + } + + case "Projection": + if err := awsAwsjson10_deserializeDocumentProjection(&sv.Projection, value); err != nil { + return err + } + + case "ProvisionedThroughput": + if err := awsAwsjson10_deserializeDocumentProvisionedThroughput(&sv.ProvisionedThroughput, value); err != nil { + return err + } + + case "WarmThroughput": + if err := awsAwsjson10_deserializeDocumentWarmThroughput(&sv.WarmThroughput, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentGlobalSecondaryIndexDescription(v **types.GlobalSecondaryIndexDescription, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.GlobalSecondaryIndexDescription + if *v == nil { + sv = &types.GlobalSecondaryIndexDescription{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Backfilling": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected Backfilling to be of type *bool, got %T instead", value) + } + sv.Backfilling = ptr.Bool(jtv) + } + + case "IndexArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.IndexArn = ptr.String(jtv) + } + + case "IndexName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected IndexName to be of type string, got %T instead", value) + } + sv.IndexName = ptr.String(jtv) + } + + case "IndexSizeBytes": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected LongObject to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.IndexSizeBytes = ptr.Int64(i64) + } + + case "IndexStatus": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected IndexStatus to be of type string, got %T instead", value) + } + sv.IndexStatus = types.IndexStatus(jtv) + } + + case "ItemCount": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected LongObject to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.ItemCount = ptr.Int64(i64) + } + + case "KeySchema": + if err := awsAwsjson10_deserializeDocumentKeySchema(&sv.KeySchema, value); err != nil { + return err + } + + case "OnDemandThroughput": + if err := awsAwsjson10_deserializeDocumentOnDemandThroughput(&sv.OnDemandThroughput, value); err != nil { + return err + } + + case "Projection": + if err := awsAwsjson10_deserializeDocumentProjection(&sv.Projection, value); err != nil { + return err + } + + case "ProvisionedThroughput": + if err := awsAwsjson10_deserializeDocumentProvisionedThroughputDescription(&sv.ProvisionedThroughput, value); err != nil { + return err + } + + case "WarmThroughput": + if err := awsAwsjson10_deserializeDocumentGlobalSecondaryIndexWarmThroughputDescription(&sv.WarmThroughput, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentGlobalSecondaryIndexDescriptionList(v *[]types.GlobalSecondaryIndexDescription, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.GlobalSecondaryIndexDescription + if *v == nil { + cv = []types.GlobalSecondaryIndexDescription{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.GlobalSecondaryIndexDescription + destAddr := &col + if err := awsAwsjson10_deserializeDocumentGlobalSecondaryIndexDescription(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson10_deserializeDocumentGlobalSecondaryIndexes(v *[]types.GlobalSecondaryIndexInfo, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.GlobalSecondaryIndexInfo + if *v == nil { + cv = []types.GlobalSecondaryIndexInfo{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.GlobalSecondaryIndexInfo + destAddr := &col + if err := awsAwsjson10_deserializeDocumentGlobalSecondaryIndexInfo(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson10_deserializeDocumentGlobalSecondaryIndexInfo(v **types.GlobalSecondaryIndexInfo, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.GlobalSecondaryIndexInfo + if *v == nil { + sv = &types.GlobalSecondaryIndexInfo{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "IndexName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected IndexName to be of type string, got %T instead", value) + } + sv.IndexName = ptr.String(jtv) + } + + case "KeySchema": + if err := awsAwsjson10_deserializeDocumentKeySchema(&sv.KeySchema, value); err != nil { + return err + } + + case "OnDemandThroughput": + if err := awsAwsjson10_deserializeDocumentOnDemandThroughput(&sv.OnDemandThroughput, value); err != nil { + return err + } + + case "Projection": + if err := awsAwsjson10_deserializeDocumentProjection(&sv.Projection, value); err != nil { + return err + } + + case "ProvisionedThroughput": + if err := awsAwsjson10_deserializeDocumentProvisionedThroughput(&sv.ProvisionedThroughput, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentGlobalSecondaryIndexList(v *[]types.GlobalSecondaryIndex, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.GlobalSecondaryIndex + if *v == nil { + cv = []types.GlobalSecondaryIndex{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.GlobalSecondaryIndex + destAddr := &col + if err := awsAwsjson10_deserializeDocumentGlobalSecondaryIndex(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson10_deserializeDocumentGlobalSecondaryIndexWarmThroughputDescription(v **types.GlobalSecondaryIndexWarmThroughputDescription, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.GlobalSecondaryIndexWarmThroughputDescription + if *v == nil { + sv = &types.GlobalSecondaryIndexWarmThroughputDescription{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ReadUnitsPerSecond": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected PositiveLongObject to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.ReadUnitsPerSecond = ptr.Int64(i64) + } + + case "Status": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected IndexStatus to be of type string, got %T instead", value) + } + sv.Status = types.IndexStatus(jtv) + } + + case "WriteUnitsPerSecond": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected PositiveLongObject to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.WriteUnitsPerSecond = ptr.Int64(i64) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentGlobalTable(v **types.GlobalTable, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.GlobalTable + if *v == nil { + sv = &types.GlobalTable{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "GlobalTableName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TableName to be of type string, got %T instead", value) + } + sv.GlobalTableName = ptr.String(jtv) + } + + case "ReplicationGroup": + if err := awsAwsjson10_deserializeDocumentReplicaList(&sv.ReplicationGroup, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentGlobalTableAlreadyExistsException(v **types.GlobalTableAlreadyExistsException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.GlobalTableAlreadyExistsException + if *v == nil { + sv = &types.GlobalTableAlreadyExistsException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message", "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentGlobalTableDescription(v **types.GlobalTableDescription, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.GlobalTableDescription + if *v == nil { + sv = &types.GlobalTableDescription{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "CreationDateTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.CreationDateTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Date to be a JSON Number, got %T instead", value) + + } + } + + case "GlobalTableArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected GlobalTableArnString to be of type string, got %T instead", value) + } + sv.GlobalTableArn = ptr.String(jtv) + } + + case "GlobalTableName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TableName to be of type string, got %T instead", value) + } + sv.GlobalTableName = ptr.String(jtv) + } + + case "GlobalTableStatus": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected GlobalTableStatus to be of type string, got %T instead", value) + } + sv.GlobalTableStatus = types.GlobalTableStatus(jtv) + } + + case "ReplicationGroup": + if err := awsAwsjson10_deserializeDocumentReplicaDescriptionList(&sv.ReplicationGroup, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentGlobalTableList(v *[]types.GlobalTable, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.GlobalTable + if *v == nil { + cv = []types.GlobalTable{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.GlobalTable + destAddr := &col + if err := awsAwsjson10_deserializeDocumentGlobalTable(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson10_deserializeDocumentGlobalTableNotFoundException(v **types.GlobalTableNotFoundException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.GlobalTableNotFoundException + if *v == nil { + sv = &types.GlobalTableNotFoundException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message", "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentGlobalTableWitnessDescription(v **types.GlobalTableWitnessDescription, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.GlobalTableWitnessDescription + if *v == nil { + sv = &types.GlobalTableWitnessDescription{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "RegionName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RegionName to be of type string, got %T instead", value) + } + sv.RegionName = ptr.String(jtv) + } + + case "WitnessStatus": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected WitnessStatus to be of type string, got %T instead", value) + } + sv.WitnessStatus = types.WitnessStatus(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentGlobalTableWitnessDescriptionList(v *[]types.GlobalTableWitnessDescription, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.GlobalTableWitnessDescription + if *v == nil { + cv = []types.GlobalTableWitnessDescription{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.GlobalTableWitnessDescription + destAddr := &col + if err := awsAwsjson10_deserializeDocumentGlobalTableWitnessDescription(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson10_deserializeDocumentIdempotentParameterMismatchException(v **types.IdempotentParameterMismatchException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.IdempotentParameterMismatchException + if *v == nil { + sv = &types.IdempotentParameterMismatchException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message", "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentImportConflictException(v **types.ImportConflictException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ImportConflictException + if *v == nil { + sv = &types.ImportConflictException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message", "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentImportNotFoundException(v **types.ImportNotFoundException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ImportNotFoundException + if *v == nil { + sv = &types.ImportNotFoundException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message", "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentImportSummary(v **types.ImportSummary, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ImportSummary + if *v == nil { + sv = &types.ImportSummary{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "CloudWatchLogGroupArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CloudWatchLogGroupArn to be of type string, got %T instead", value) + } + sv.CloudWatchLogGroupArn = ptr.String(jtv) + } + + case "EndTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.EndTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected ImportEndTime to be a JSON Number, got %T instead", value) + + } + } + + case "ImportArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ImportArn to be of type string, got %T instead", value) + } + sv.ImportArn = ptr.String(jtv) + } + + case "ImportStatus": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ImportStatus to be of type string, got %T instead", value) + } + sv.ImportStatus = types.ImportStatus(jtv) + } + + case "InputFormat": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected InputFormat to be of type string, got %T instead", value) + } + sv.InputFormat = types.InputFormat(jtv) + } + + case "S3BucketSource": + if err := awsAwsjson10_deserializeDocumentS3BucketSource(&sv.S3BucketSource, value); err != nil { + return err + } + + case "StartTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.StartTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected ImportStartTime to be a JSON Number, got %T instead", value) + + } + } + + case "TableArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TableArn to be of type string, got %T instead", value) + } + sv.TableArn = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentImportSummaryList(v *[]types.ImportSummary, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.ImportSummary + if *v == nil { + cv = []types.ImportSummary{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.ImportSummary + destAddr := &col + if err := awsAwsjson10_deserializeDocumentImportSummary(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson10_deserializeDocumentImportTableDescription(v **types.ImportTableDescription, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ImportTableDescription + if *v == nil { + sv = &types.ImportTableDescription{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ClientToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ClientToken to be of type string, got %T instead", value) + } + sv.ClientToken = ptr.String(jtv) + } + + case "CloudWatchLogGroupArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CloudWatchLogGroupArn to be of type string, got %T instead", value) + } + sv.CloudWatchLogGroupArn = ptr.String(jtv) + } + + case "EndTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.EndTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected ImportEndTime to be a JSON Number, got %T instead", value) + + } + } + + case "ErrorCount": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected ErrorCount to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.ErrorCount = i64 + } + + case "FailureCode": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected FailureCode to be of type string, got %T instead", value) + } + sv.FailureCode = ptr.String(jtv) + } + + case "FailureMessage": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected FailureMessage to be of type string, got %T instead", value) + } + sv.FailureMessage = ptr.String(jtv) + } + + case "ImportArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ImportArn to be of type string, got %T instead", value) + } + sv.ImportArn = ptr.String(jtv) + } + + case "ImportedItemCount": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected ImportedItemCount to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.ImportedItemCount = i64 + } + + case "ImportStatus": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ImportStatus to be of type string, got %T instead", value) + } + sv.ImportStatus = types.ImportStatus(jtv) + } + + case "InputCompressionType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected InputCompressionType to be of type string, got %T instead", value) + } + sv.InputCompressionType = types.InputCompressionType(jtv) + } + + case "InputFormat": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected InputFormat to be of type string, got %T instead", value) + } + sv.InputFormat = types.InputFormat(jtv) + } + + case "InputFormatOptions": + if err := awsAwsjson10_deserializeDocumentInputFormatOptions(&sv.InputFormatOptions, value); err != nil { + return err + } + + case "ProcessedItemCount": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected ProcessedItemCount to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.ProcessedItemCount = i64 + } + + case "ProcessedSizeBytes": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected LongObject to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.ProcessedSizeBytes = ptr.Int64(i64) + } + + case "S3BucketSource": + if err := awsAwsjson10_deserializeDocumentS3BucketSource(&sv.S3BucketSource, value); err != nil { + return err + } + + case "StartTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.StartTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected ImportStartTime to be a JSON Number, got %T instead", value) + + } + } + + case "TableArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TableArn to be of type string, got %T instead", value) + } + sv.TableArn = ptr.String(jtv) + } + + case "TableCreationParameters": + if err := awsAwsjson10_deserializeDocumentTableCreationParameters(&sv.TableCreationParameters, value); err != nil { + return err + } + + case "TableId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TableId to be of type string, got %T instead", value) + } + sv.TableId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentIncrementalExportSpecification(v **types.IncrementalExportSpecification, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.IncrementalExportSpecification + if *v == nil { + sv = &types.IncrementalExportSpecification{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ExportFromTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.ExportFromTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected ExportFromTime to be a JSON Number, got %T instead", value) + + } + } + + case "ExportToTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.ExportToTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected ExportToTime to be a JSON Number, got %T instead", value) + + } + } + + case "ExportViewType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExportViewType to be of type string, got %T instead", value) + } + sv.ExportViewType = types.ExportViewType(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentIndexNotFoundException(v **types.IndexNotFoundException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.IndexNotFoundException + if *v == nil { + sv = &types.IndexNotFoundException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message", "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentInputFormatOptions(v **types.InputFormatOptions, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InputFormatOptions + if *v == nil { + sv = &types.InputFormatOptions{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Csv": + if err := awsAwsjson10_deserializeDocumentCsvOptions(&sv.Csv, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentInternalServerError(v **types.InternalServerError, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InternalServerError + if *v == nil { + sv = &types.InternalServerError{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message", "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentInvalidEndpointException(v **types.InvalidEndpointException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidEndpointException + if *v == nil { + sv = &types.InvalidEndpointException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message", "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentInvalidExportTimeException(v **types.InvalidExportTimeException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidExportTimeException + if *v == nil { + sv = &types.InvalidExportTimeException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message", "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentInvalidRestoreTimeException(v **types.InvalidRestoreTimeException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidRestoreTimeException + if *v == nil { + sv = &types.InvalidRestoreTimeException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message", "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentItemCollectionKeyAttributeMap(v *map[string]types.AttributeValue, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var mv map[string]types.AttributeValue + if *v == nil { + mv = map[string]types.AttributeValue{} + } else { + mv = *v + } + + for key, value := range shape { + var parsedVal types.AttributeValue + mapVar := parsedVal + if err := awsAwsjson10_deserializeDocumentAttributeValue(&mapVar, value); err != nil { + return err + } + parsedVal = mapVar + mv[key] = parsedVal + + } + *v = mv + return nil +} + +func awsAwsjson10_deserializeDocumentItemCollectionMetrics(v **types.ItemCollectionMetrics, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ItemCollectionMetrics + if *v == nil { + sv = &types.ItemCollectionMetrics{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ItemCollectionKey": + if err := awsAwsjson10_deserializeDocumentItemCollectionKeyAttributeMap(&sv.ItemCollectionKey, value); err != nil { + return err + } + + case "SizeEstimateRangeGB": + if err := awsAwsjson10_deserializeDocumentItemCollectionSizeEstimateRange(&sv.SizeEstimateRangeGB, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentItemCollectionMetricsMultiple(v *[]types.ItemCollectionMetrics, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.ItemCollectionMetrics + if *v == nil { + cv = []types.ItemCollectionMetrics{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.ItemCollectionMetrics + destAddr := &col + if err := awsAwsjson10_deserializeDocumentItemCollectionMetrics(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson10_deserializeDocumentItemCollectionMetricsPerTable(v *map[string][]types.ItemCollectionMetrics, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var mv map[string][]types.ItemCollectionMetrics + if *v == nil { + mv = map[string][]types.ItemCollectionMetrics{} + } else { + mv = *v + } + + for key, value := range shape { + var parsedVal []types.ItemCollectionMetrics + mapVar := parsedVal + if err := awsAwsjson10_deserializeDocumentItemCollectionMetricsMultiple(&mapVar, value); err != nil { + return err + } + parsedVal = mapVar + mv[key] = parsedVal + + } + *v = mv + return nil +} + +func awsAwsjson10_deserializeDocumentItemCollectionSizeEstimateRange(v *[]float64, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []float64 + if *v == nil { + cv = []float64{} + } else { + cv = *v + } + + for _, value := range shape { + var col float64 + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + col = f64 + + case string: + var f64 float64 + switch { + case strings.EqualFold(jtv, "NaN"): + f64 = math.NaN() + + case strings.EqualFold(jtv, "Infinity"): + f64 = math.Inf(1) + + case strings.EqualFold(jtv, "-Infinity"): + f64 = math.Inf(-1) + + default: + return fmt.Errorf("unknown JSON number value: %s", jtv) + + } + col = f64 + + default: + return fmt.Errorf("expected ItemCollectionSizeEstimateBound to be a JSON Number, got %T instead", value) + + } + } + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson10_deserializeDocumentItemCollectionSizeLimitExceededException(v **types.ItemCollectionSizeLimitExceededException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ItemCollectionSizeLimitExceededException + if *v == nil { + sv = &types.ItemCollectionSizeLimitExceededException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message", "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentItemList(v *[]map[string]types.AttributeValue, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []map[string]types.AttributeValue + if *v == nil { + cv = []map[string]types.AttributeValue{} + } else { + cv = *v + } + + for _, value := range shape { + var col map[string]types.AttributeValue + if err := awsAwsjson10_deserializeDocumentAttributeMap(&col, value); err != nil { + return err + } + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson10_deserializeDocumentItemResponse(v **types.ItemResponse, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ItemResponse + if *v == nil { + sv = &types.ItemResponse{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Item": + if err := awsAwsjson10_deserializeDocumentAttributeMap(&sv.Item, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentItemResponseList(v *[]types.ItemResponse, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.ItemResponse + if *v == nil { + cv = []types.ItemResponse{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.ItemResponse + destAddr := &col + if err := awsAwsjson10_deserializeDocumentItemResponse(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson10_deserializeDocumentKey(v *map[string]types.AttributeValue, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var mv map[string]types.AttributeValue + if *v == nil { + mv = map[string]types.AttributeValue{} + } else { + mv = *v + } + + for key, value := range shape { + var parsedVal types.AttributeValue + mapVar := parsedVal + if err := awsAwsjson10_deserializeDocumentAttributeValue(&mapVar, value); err != nil { + return err + } + parsedVal = mapVar + mv[key] = parsedVal + + } + *v = mv + return nil +} + +func awsAwsjson10_deserializeDocumentKeyList(v *[]map[string]types.AttributeValue, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []map[string]types.AttributeValue + if *v == nil { + cv = []map[string]types.AttributeValue{} + } else { + cv = *v + } + + for _, value := range shape { + var col map[string]types.AttributeValue + if err := awsAwsjson10_deserializeDocumentKey(&col, value); err != nil { + return err + } + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson10_deserializeDocumentKeysAndAttributes(v **types.KeysAndAttributes, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.KeysAndAttributes + if *v == nil { + sv = &types.KeysAndAttributes{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "AttributesToGet": + if err := awsAwsjson10_deserializeDocumentAttributeNameList(&sv.AttributesToGet, value); err != nil { + return err + } + + case "ConsistentRead": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected ConsistentRead to be of type *bool, got %T instead", value) + } + sv.ConsistentRead = ptr.Bool(jtv) + } + + case "ExpressionAttributeNames": + if err := awsAwsjson10_deserializeDocumentExpressionAttributeNameMap(&sv.ExpressionAttributeNames, value); err != nil { + return err + } + + case "Keys": + if err := awsAwsjson10_deserializeDocumentKeyList(&sv.Keys, value); err != nil { + return err + } + + case "ProjectionExpression": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ProjectionExpression to be of type string, got %T instead", value) + } + sv.ProjectionExpression = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentKeySchema(v *[]types.KeySchemaElement, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.KeySchemaElement + if *v == nil { + cv = []types.KeySchemaElement{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.KeySchemaElement + destAddr := &col + if err := awsAwsjson10_deserializeDocumentKeySchemaElement(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson10_deserializeDocumentKeySchemaElement(v **types.KeySchemaElement, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.KeySchemaElement + if *v == nil { + sv = &types.KeySchemaElement{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "AttributeName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeySchemaAttributeName to be of type string, got %T instead", value) + } + sv.AttributeName = ptr.String(jtv) + } + + case "KeyType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyType to be of type string, got %T instead", value) + } + sv.KeyType = types.KeyType(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentKinesisDataStreamDestination(v **types.KinesisDataStreamDestination, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.KinesisDataStreamDestination + if *v == nil { + sv = &types.KinesisDataStreamDestination{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ApproximateCreationDateTimePrecision": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ApproximateCreationDateTimePrecision to be of type string, got %T instead", value) + } + sv.ApproximateCreationDateTimePrecision = types.ApproximateCreationDateTimePrecision(jtv) + } + + case "DestinationStatus": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DestinationStatus to be of type string, got %T instead", value) + } + sv.DestinationStatus = types.DestinationStatus(jtv) + } + + case "DestinationStatusDescription": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.DestinationStatusDescription = ptr.String(jtv) + } + + case "StreamArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected StreamArn to be of type string, got %T instead", value) + } + sv.StreamArn = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentKinesisDataStreamDestinations(v *[]types.KinesisDataStreamDestination, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.KinesisDataStreamDestination + if *v == nil { + cv = []types.KinesisDataStreamDestination{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.KinesisDataStreamDestination + destAddr := &col + if err := awsAwsjson10_deserializeDocumentKinesisDataStreamDestination(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson10_deserializeDocumentLimitExceededException(v **types.LimitExceededException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.LimitExceededException + if *v == nil { + sv = &types.LimitExceededException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message", "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentListAttributeValue(v *[]types.AttributeValue, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.AttributeValue + if *v == nil { + cv = []types.AttributeValue{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.AttributeValue + if err := awsAwsjson10_deserializeDocumentAttributeValue(&col, value); err != nil { + return err + } + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson10_deserializeDocumentLocalSecondaryIndexDescription(v **types.LocalSecondaryIndexDescription, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.LocalSecondaryIndexDescription + if *v == nil { + sv = &types.LocalSecondaryIndexDescription{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "IndexArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.IndexArn = ptr.String(jtv) + } + + case "IndexName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected IndexName to be of type string, got %T instead", value) + } + sv.IndexName = ptr.String(jtv) + } + + case "IndexSizeBytes": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected LongObject to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.IndexSizeBytes = ptr.Int64(i64) + } + + case "ItemCount": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected LongObject to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.ItemCount = ptr.Int64(i64) + } + + case "KeySchema": + if err := awsAwsjson10_deserializeDocumentKeySchema(&sv.KeySchema, value); err != nil { + return err + } + + case "Projection": + if err := awsAwsjson10_deserializeDocumentProjection(&sv.Projection, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentLocalSecondaryIndexDescriptionList(v *[]types.LocalSecondaryIndexDescription, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.LocalSecondaryIndexDescription + if *v == nil { + cv = []types.LocalSecondaryIndexDescription{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.LocalSecondaryIndexDescription + destAddr := &col + if err := awsAwsjson10_deserializeDocumentLocalSecondaryIndexDescription(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson10_deserializeDocumentLocalSecondaryIndexes(v *[]types.LocalSecondaryIndexInfo, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.LocalSecondaryIndexInfo + if *v == nil { + cv = []types.LocalSecondaryIndexInfo{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.LocalSecondaryIndexInfo + destAddr := &col + if err := awsAwsjson10_deserializeDocumentLocalSecondaryIndexInfo(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson10_deserializeDocumentLocalSecondaryIndexInfo(v **types.LocalSecondaryIndexInfo, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.LocalSecondaryIndexInfo + if *v == nil { + sv = &types.LocalSecondaryIndexInfo{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "IndexName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected IndexName to be of type string, got %T instead", value) + } + sv.IndexName = ptr.String(jtv) + } + + case "KeySchema": + if err := awsAwsjson10_deserializeDocumentKeySchema(&sv.KeySchema, value); err != nil { + return err + } + + case "Projection": + if err := awsAwsjson10_deserializeDocumentProjection(&sv.Projection, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentMapAttributeValue(v *map[string]types.AttributeValue, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var mv map[string]types.AttributeValue + if *v == nil { + mv = map[string]types.AttributeValue{} + } else { + mv = *v + } + + for key, value := range shape { + var parsedVal types.AttributeValue + mapVar := parsedVal + if err := awsAwsjson10_deserializeDocumentAttributeValue(&mapVar, value); err != nil { + return err + } + parsedVal = mapVar + mv[key] = parsedVal + + } + *v = mv + return nil +} + +func awsAwsjson10_deserializeDocumentNonKeyAttributeNameList(v *[]string, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []string + if *v == nil { + cv = []string{} + } else { + cv = *v + } + + for _, value := range shape { + var col string + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NonKeyAttributeName to be of type string, got %T instead", value) + } + col = jtv + } + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson10_deserializeDocumentNumberSetAttributeValue(v *[]string, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []string + if *v == nil { + cv = []string{} + } else { + cv = *v + } + + for _, value := range shape { + var col string + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NumberAttributeValue to be of type string, got %T instead", value) + } + col = jtv + } + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson10_deserializeDocumentOnDemandThroughput(v **types.OnDemandThroughput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.OnDemandThroughput + if *v == nil { + sv = &types.OnDemandThroughput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "MaxReadRequestUnits": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected LongObject to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.MaxReadRequestUnits = ptr.Int64(i64) + } + + case "MaxWriteRequestUnits": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected LongObject to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.MaxWriteRequestUnits = ptr.Int64(i64) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentOnDemandThroughputOverride(v **types.OnDemandThroughputOverride, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.OnDemandThroughputOverride + if *v == nil { + sv = &types.OnDemandThroughputOverride{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "MaxReadRequestUnits": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected LongObject to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.MaxReadRequestUnits = ptr.Int64(i64) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentPartiQLBatchResponse(v *[]types.BatchStatementResponse, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.BatchStatementResponse + if *v == nil { + cv = []types.BatchStatementResponse{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.BatchStatementResponse + destAddr := &col + if err := awsAwsjson10_deserializeDocumentBatchStatementResponse(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson10_deserializeDocumentPointInTimeRecoveryDescription(v **types.PointInTimeRecoveryDescription, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.PointInTimeRecoveryDescription + if *v == nil { + sv = &types.PointInTimeRecoveryDescription{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "EarliestRestorableDateTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.EarliestRestorableDateTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Date to be a JSON Number, got %T instead", value) + + } + } + + case "LatestRestorableDateTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.LatestRestorableDateTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Date to be a JSON Number, got %T instead", value) + + } + } + + case "PointInTimeRecoveryStatus": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PointInTimeRecoveryStatus to be of type string, got %T instead", value) + } + sv.PointInTimeRecoveryStatus = types.PointInTimeRecoveryStatus(jtv) + } + + case "RecoveryPeriodInDays": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected RecoveryPeriodInDays to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.RecoveryPeriodInDays = ptr.Int32(int32(i64)) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentPointInTimeRecoveryUnavailableException(v **types.PointInTimeRecoveryUnavailableException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.PointInTimeRecoveryUnavailableException + if *v == nil { + sv = &types.PointInTimeRecoveryUnavailableException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message", "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentPolicyNotFoundException(v **types.PolicyNotFoundException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.PolicyNotFoundException + if *v == nil { + sv = &types.PolicyNotFoundException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message", "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentProjection(v **types.Projection, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.Projection + if *v == nil { + sv = &types.Projection{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "NonKeyAttributes": + if err := awsAwsjson10_deserializeDocumentNonKeyAttributeNameList(&sv.NonKeyAttributes, value); err != nil { + return err + } + + case "ProjectionType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ProjectionType to be of type string, got %T instead", value) + } + sv.ProjectionType = types.ProjectionType(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentProvisionedThroughput(v **types.ProvisionedThroughput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ProvisionedThroughput + if *v == nil { + sv = &types.ProvisionedThroughput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ReadCapacityUnits": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected PositiveLongObject to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.ReadCapacityUnits = ptr.Int64(i64) + } + + case "WriteCapacityUnits": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected PositiveLongObject to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.WriteCapacityUnits = ptr.Int64(i64) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentProvisionedThroughputDescription(v **types.ProvisionedThroughputDescription, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ProvisionedThroughputDescription + if *v == nil { + sv = &types.ProvisionedThroughputDescription{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "LastDecreaseDateTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.LastDecreaseDateTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Date to be a JSON Number, got %T instead", value) + + } + } + + case "LastIncreaseDateTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.LastIncreaseDateTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Date to be a JSON Number, got %T instead", value) + + } + } + + case "NumberOfDecreasesToday": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected PositiveLongObject to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.NumberOfDecreasesToday = ptr.Int64(i64) + } + + case "ReadCapacityUnits": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected NonNegativeLongObject to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.ReadCapacityUnits = ptr.Int64(i64) + } + + case "WriteCapacityUnits": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected NonNegativeLongObject to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.WriteCapacityUnits = ptr.Int64(i64) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentProvisionedThroughputExceededException(v **types.ProvisionedThroughputExceededException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ProvisionedThroughputExceededException + if *v == nil { + sv = &types.ProvisionedThroughputExceededException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message", "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + case "ThrottlingReasons": + if err := awsAwsjson10_deserializeDocumentThrottlingReasonList(&sv.ThrottlingReasons, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentProvisionedThroughputOverride(v **types.ProvisionedThroughputOverride, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ProvisionedThroughputOverride + if *v == nil { + sv = &types.ProvisionedThroughputOverride{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ReadCapacityUnits": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected PositiveLongObject to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.ReadCapacityUnits = ptr.Int64(i64) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentPutItemInputAttributeMap(v *map[string]types.AttributeValue, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var mv map[string]types.AttributeValue + if *v == nil { + mv = map[string]types.AttributeValue{} + } else { + mv = *v + } + + for key, value := range shape { + var parsedVal types.AttributeValue + mapVar := parsedVal + if err := awsAwsjson10_deserializeDocumentAttributeValue(&mapVar, value); err != nil { + return err + } + parsedVal = mapVar + mv[key] = parsedVal + + } + *v = mv + return nil +} + +func awsAwsjson10_deserializeDocumentPutRequest(v **types.PutRequest, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.PutRequest + if *v == nil { + sv = &types.PutRequest{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Item": + if err := awsAwsjson10_deserializeDocumentPutItemInputAttributeMap(&sv.Item, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentReplica(v **types.Replica, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.Replica + if *v == nil { + sv = &types.Replica{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "RegionName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RegionName to be of type string, got %T instead", value) + } + sv.RegionName = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentReplicaAlreadyExistsException(v **types.ReplicaAlreadyExistsException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ReplicaAlreadyExistsException + if *v == nil { + sv = &types.ReplicaAlreadyExistsException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message", "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentReplicaAutoScalingDescription(v **types.ReplicaAutoScalingDescription, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ReplicaAutoScalingDescription + if *v == nil { + sv = &types.ReplicaAutoScalingDescription{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "GlobalSecondaryIndexes": + if err := awsAwsjson10_deserializeDocumentReplicaGlobalSecondaryIndexAutoScalingDescriptionList(&sv.GlobalSecondaryIndexes, value); err != nil { + return err + } + + case "RegionName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RegionName to be of type string, got %T instead", value) + } + sv.RegionName = ptr.String(jtv) + } + + case "ReplicaProvisionedReadCapacityAutoScalingSettings": + if err := awsAwsjson10_deserializeDocumentAutoScalingSettingsDescription(&sv.ReplicaProvisionedReadCapacityAutoScalingSettings, value); err != nil { + return err + } + + case "ReplicaProvisionedWriteCapacityAutoScalingSettings": + if err := awsAwsjson10_deserializeDocumentAutoScalingSettingsDescription(&sv.ReplicaProvisionedWriteCapacityAutoScalingSettings, value); err != nil { + return err + } + + case "ReplicaStatus": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ReplicaStatus to be of type string, got %T instead", value) + } + sv.ReplicaStatus = types.ReplicaStatus(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentReplicaAutoScalingDescriptionList(v *[]types.ReplicaAutoScalingDescription, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.ReplicaAutoScalingDescription + if *v == nil { + cv = []types.ReplicaAutoScalingDescription{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.ReplicaAutoScalingDescription + destAddr := &col + if err := awsAwsjson10_deserializeDocumentReplicaAutoScalingDescription(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson10_deserializeDocumentReplicaDescription(v **types.ReplicaDescription, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ReplicaDescription + if *v == nil { + sv = &types.ReplicaDescription{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "GlobalSecondaryIndexes": + if err := awsAwsjson10_deserializeDocumentReplicaGlobalSecondaryIndexDescriptionList(&sv.GlobalSecondaryIndexes, value); err != nil { + return err + } + + case "KMSMasterKeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KMSMasterKeyId to be of type string, got %T instead", value) + } + sv.KMSMasterKeyId = ptr.String(jtv) + } + + case "OnDemandThroughputOverride": + if err := awsAwsjson10_deserializeDocumentOnDemandThroughputOverride(&sv.OnDemandThroughputOverride, value); err != nil { + return err + } + + case "ProvisionedThroughputOverride": + if err := awsAwsjson10_deserializeDocumentProvisionedThroughputOverride(&sv.ProvisionedThroughputOverride, value); err != nil { + return err + } + + case "RegionName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RegionName to be of type string, got %T instead", value) + } + sv.RegionName = ptr.String(jtv) + } + + case "ReplicaInaccessibleDateTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.ReplicaInaccessibleDateTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Date to be a JSON Number, got %T instead", value) + + } + } + + case "ReplicaStatus": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ReplicaStatus to be of type string, got %T instead", value) + } + sv.ReplicaStatus = types.ReplicaStatus(jtv) + } + + case "ReplicaStatusDescription": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ReplicaStatusDescription to be of type string, got %T instead", value) + } + sv.ReplicaStatusDescription = ptr.String(jtv) + } + + case "ReplicaStatusPercentProgress": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ReplicaStatusPercentProgress to be of type string, got %T instead", value) + } + sv.ReplicaStatusPercentProgress = ptr.String(jtv) + } + + case "ReplicaTableClassSummary": + if err := awsAwsjson10_deserializeDocumentTableClassSummary(&sv.ReplicaTableClassSummary, value); err != nil { + return err + } + + case "WarmThroughput": + if err := awsAwsjson10_deserializeDocumentTableWarmThroughputDescription(&sv.WarmThroughput, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentReplicaDescriptionList(v *[]types.ReplicaDescription, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.ReplicaDescription + if *v == nil { + cv = []types.ReplicaDescription{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.ReplicaDescription + destAddr := &col + if err := awsAwsjson10_deserializeDocumentReplicaDescription(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson10_deserializeDocumentReplicaGlobalSecondaryIndexAutoScalingDescription(v **types.ReplicaGlobalSecondaryIndexAutoScalingDescription, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ReplicaGlobalSecondaryIndexAutoScalingDescription + if *v == nil { + sv = &types.ReplicaGlobalSecondaryIndexAutoScalingDescription{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "IndexName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected IndexName to be of type string, got %T instead", value) + } + sv.IndexName = ptr.String(jtv) + } + + case "IndexStatus": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected IndexStatus to be of type string, got %T instead", value) + } + sv.IndexStatus = types.IndexStatus(jtv) + } + + case "ProvisionedReadCapacityAutoScalingSettings": + if err := awsAwsjson10_deserializeDocumentAutoScalingSettingsDescription(&sv.ProvisionedReadCapacityAutoScalingSettings, value); err != nil { + return err + } + + case "ProvisionedWriteCapacityAutoScalingSettings": + if err := awsAwsjson10_deserializeDocumentAutoScalingSettingsDescription(&sv.ProvisionedWriteCapacityAutoScalingSettings, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentReplicaGlobalSecondaryIndexAutoScalingDescriptionList(v *[]types.ReplicaGlobalSecondaryIndexAutoScalingDescription, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.ReplicaGlobalSecondaryIndexAutoScalingDescription + if *v == nil { + cv = []types.ReplicaGlobalSecondaryIndexAutoScalingDescription{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.ReplicaGlobalSecondaryIndexAutoScalingDescription + destAddr := &col + if err := awsAwsjson10_deserializeDocumentReplicaGlobalSecondaryIndexAutoScalingDescription(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson10_deserializeDocumentReplicaGlobalSecondaryIndexDescription(v **types.ReplicaGlobalSecondaryIndexDescription, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ReplicaGlobalSecondaryIndexDescription + if *v == nil { + sv = &types.ReplicaGlobalSecondaryIndexDescription{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "IndexName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected IndexName to be of type string, got %T instead", value) + } + sv.IndexName = ptr.String(jtv) + } + + case "OnDemandThroughputOverride": + if err := awsAwsjson10_deserializeDocumentOnDemandThroughputOverride(&sv.OnDemandThroughputOverride, value); err != nil { + return err + } + + case "ProvisionedThroughputOverride": + if err := awsAwsjson10_deserializeDocumentProvisionedThroughputOverride(&sv.ProvisionedThroughputOverride, value); err != nil { + return err + } + + case "WarmThroughput": + if err := awsAwsjson10_deserializeDocumentGlobalSecondaryIndexWarmThroughputDescription(&sv.WarmThroughput, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentReplicaGlobalSecondaryIndexDescriptionList(v *[]types.ReplicaGlobalSecondaryIndexDescription, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.ReplicaGlobalSecondaryIndexDescription + if *v == nil { + cv = []types.ReplicaGlobalSecondaryIndexDescription{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.ReplicaGlobalSecondaryIndexDescription + destAddr := &col + if err := awsAwsjson10_deserializeDocumentReplicaGlobalSecondaryIndexDescription(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson10_deserializeDocumentReplicaGlobalSecondaryIndexSettingsDescription(v **types.ReplicaGlobalSecondaryIndexSettingsDescription, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ReplicaGlobalSecondaryIndexSettingsDescription + if *v == nil { + sv = &types.ReplicaGlobalSecondaryIndexSettingsDescription{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "IndexName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected IndexName to be of type string, got %T instead", value) + } + sv.IndexName = ptr.String(jtv) + } + + case "IndexStatus": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected IndexStatus to be of type string, got %T instead", value) + } + sv.IndexStatus = types.IndexStatus(jtv) + } + + case "ProvisionedReadCapacityAutoScalingSettings": + if err := awsAwsjson10_deserializeDocumentAutoScalingSettingsDescription(&sv.ProvisionedReadCapacityAutoScalingSettings, value); err != nil { + return err + } + + case "ProvisionedReadCapacityUnits": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected PositiveLongObject to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.ProvisionedReadCapacityUnits = ptr.Int64(i64) + } + + case "ProvisionedWriteCapacityAutoScalingSettings": + if err := awsAwsjson10_deserializeDocumentAutoScalingSettingsDescription(&sv.ProvisionedWriteCapacityAutoScalingSettings, value); err != nil { + return err + } + + case "ProvisionedWriteCapacityUnits": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected PositiveLongObject to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.ProvisionedWriteCapacityUnits = ptr.Int64(i64) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentReplicaGlobalSecondaryIndexSettingsDescriptionList(v *[]types.ReplicaGlobalSecondaryIndexSettingsDescription, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.ReplicaGlobalSecondaryIndexSettingsDescription + if *v == nil { + cv = []types.ReplicaGlobalSecondaryIndexSettingsDescription{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.ReplicaGlobalSecondaryIndexSettingsDescription + destAddr := &col + if err := awsAwsjson10_deserializeDocumentReplicaGlobalSecondaryIndexSettingsDescription(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson10_deserializeDocumentReplicaList(v *[]types.Replica, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.Replica + if *v == nil { + cv = []types.Replica{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.Replica + destAddr := &col + if err := awsAwsjson10_deserializeDocumentReplica(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson10_deserializeDocumentReplicaNotFoundException(v **types.ReplicaNotFoundException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ReplicaNotFoundException + if *v == nil { + sv = &types.ReplicaNotFoundException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message", "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentReplicaSettingsDescription(v **types.ReplicaSettingsDescription, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ReplicaSettingsDescription + if *v == nil { + sv = &types.ReplicaSettingsDescription{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "RegionName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RegionName to be of type string, got %T instead", value) + } + sv.RegionName = ptr.String(jtv) + } + + case "ReplicaBillingModeSummary": + if err := awsAwsjson10_deserializeDocumentBillingModeSummary(&sv.ReplicaBillingModeSummary, value); err != nil { + return err + } + + case "ReplicaGlobalSecondaryIndexSettings": + if err := awsAwsjson10_deserializeDocumentReplicaGlobalSecondaryIndexSettingsDescriptionList(&sv.ReplicaGlobalSecondaryIndexSettings, value); err != nil { + return err + } + + case "ReplicaProvisionedReadCapacityAutoScalingSettings": + if err := awsAwsjson10_deserializeDocumentAutoScalingSettingsDescription(&sv.ReplicaProvisionedReadCapacityAutoScalingSettings, value); err != nil { + return err + } + + case "ReplicaProvisionedReadCapacityUnits": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected NonNegativeLongObject to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.ReplicaProvisionedReadCapacityUnits = ptr.Int64(i64) + } + + case "ReplicaProvisionedWriteCapacityAutoScalingSettings": + if err := awsAwsjson10_deserializeDocumentAutoScalingSettingsDescription(&sv.ReplicaProvisionedWriteCapacityAutoScalingSettings, value); err != nil { + return err + } + + case "ReplicaProvisionedWriteCapacityUnits": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected NonNegativeLongObject to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.ReplicaProvisionedWriteCapacityUnits = ptr.Int64(i64) + } + + case "ReplicaStatus": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ReplicaStatus to be of type string, got %T instead", value) + } + sv.ReplicaStatus = types.ReplicaStatus(jtv) + } + + case "ReplicaTableClassSummary": + if err := awsAwsjson10_deserializeDocumentTableClassSummary(&sv.ReplicaTableClassSummary, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentReplicaSettingsDescriptionList(v *[]types.ReplicaSettingsDescription, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.ReplicaSettingsDescription + if *v == nil { + cv = []types.ReplicaSettingsDescription{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.ReplicaSettingsDescription + destAddr := &col + if err := awsAwsjson10_deserializeDocumentReplicaSettingsDescription(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson10_deserializeDocumentReplicatedWriteConflictException(v **types.ReplicatedWriteConflictException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ReplicatedWriteConflictException + if *v == nil { + sv = &types.ReplicatedWriteConflictException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message", "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentRequestLimitExceeded(v **types.RequestLimitExceeded, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.RequestLimitExceeded + if *v == nil { + sv = &types.RequestLimitExceeded{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message", "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + case "ThrottlingReasons": + if err := awsAwsjson10_deserializeDocumentThrottlingReasonList(&sv.ThrottlingReasons, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentResourceInUseException(v **types.ResourceInUseException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ResourceInUseException + if *v == nil { + sv = &types.ResourceInUseException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message", "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentResourceNotFoundException(v **types.ResourceNotFoundException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ResourceNotFoundException + if *v == nil { + sv = &types.ResourceNotFoundException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message", "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentRestoreSummary(v **types.RestoreSummary, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.RestoreSummary + if *v == nil { + sv = &types.RestoreSummary{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "RestoreDateTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.RestoreDateTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Date to be a JSON Number, got %T instead", value) + + } + } + + case "RestoreInProgress": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected RestoreInProgress to be of type *bool, got %T instead", value) + } + sv.RestoreInProgress = ptr.Bool(jtv) + } + + case "SourceBackupArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected BackupArn to be of type string, got %T instead", value) + } + sv.SourceBackupArn = ptr.String(jtv) + } + + case "SourceTableArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TableArn to be of type string, got %T instead", value) + } + sv.SourceTableArn = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentS3BucketSource(v **types.S3BucketSource, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.S3BucketSource + if *v == nil { + sv = &types.S3BucketSource{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "S3Bucket": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected S3Bucket to be of type string, got %T instead", value) + } + sv.S3Bucket = ptr.String(jtv) + } + + case "S3BucketOwner": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected S3BucketOwner to be of type string, got %T instead", value) + } + sv.S3BucketOwner = ptr.String(jtv) + } + + case "S3KeyPrefix": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected S3Prefix to be of type string, got %T instead", value) + } + sv.S3KeyPrefix = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentSecondaryIndexesCapacityMap(v *map[string]types.Capacity, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var mv map[string]types.Capacity + if *v == nil { + mv = map[string]types.Capacity{} + } else { + mv = *v + } + + for key, value := range shape { + var parsedVal types.Capacity + mapVar := parsedVal + destAddr := &mapVar + if err := awsAwsjson10_deserializeDocumentCapacity(&destAddr, value); err != nil { + return err + } + parsedVal = *destAddr + mv[key] = parsedVal + + } + *v = mv + return nil +} + +func awsAwsjson10_deserializeDocumentSourceTableDetails(v **types.SourceTableDetails, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.SourceTableDetails + if *v == nil { + sv = &types.SourceTableDetails{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "BillingMode": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected BillingMode to be of type string, got %T instead", value) + } + sv.BillingMode = types.BillingMode(jtv) + } + + case "ItemCount": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected ItemCount to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.ItemCount = ptr.Int64(i64) + } + + case "KeySchema": + if err := awsAwsjson10_deserializeDocumentKeySchema(&sv.KeySchema, value); err != nil { + return err + } + + case "OnDemandThroughput": + if err := awsAwsjson10_deserializeDocumentOnDemandThroughput(&sv.OnDemandThroughput, value); err != nil { + return err + } + + case "ProvisionedThroughput": + if err := awsAwsjson10_deserializeDocumentProvisionedThroughput(&sv.ProvisionedThroughput, value); err != nil { + return err + } + + case "TableArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TableArn to be of type string, got %T instead", value) + } + sv.TableArn = ptr.String(jtv) + } + + case "TableCreationDateTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.TableCreationDateTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected TableCreationDateTime to be a JSON Number, got %T instead", value) + + } + } + + case "TableId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TableId to be of type string, got %T instead", value) + } + sv.TableId = ptr.String(jtv) + } + + case "TableName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TableName to be of type string, got %T instead", value) + } + sv.TableName = ptr.String(jtv) + } + + case "TableSizeBytes": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected LongObject to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.TableSizeBytes = ptr.Int64(i64) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentSourceTableFeatureDetails(v **types.SourceTableFeatureDetails, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.SourceTableFeatureDetails + if *v == nil { + sv = &types.SourceTableFeatureDetails{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "GlobalSecondaryIndexes": + if err := awsAwsjson10_deserializeDocumentGlobalSecondaryIndexes(&sv.GlobalSecondaryIndexes, value); err != nil { + return err + } + + case "LocalSecondaryIndexes": + if err := awsAwsjson10_deserializeDocumentLocalSecondaryIndexes(&sv.LocalSecondaryIndexes, value); err != nil { + return err + } + + case "SSEDescription": + if err := awsAwsjson10_deserializeDocumentSSEDescription(&sv.SSEDescription, value); err != nil { + return err + } + + case "StreamDescription": + if err := awsAwsjson10_deserializeDocumentStreamSpecification(&sv.StreamDescription, value); err != nil { + return err + } + + case "TimeToLiveDescription": + if err := awsAwsjson10_deserializeDocumentTimeToLiveDescription(&sv.TimeToLiveDescription, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentSSEDescription(v **types.SSEDescription, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.SSEDescription + if *v == nil { + sv = &types.SSEDescription{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "InaccessibleEncryptionDateTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.InaccessibleEncryptionDateTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Date to be a JSON Number, got %T instead", value) + + } + } + + case "KMSMasterKeyArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KMSMasterKeyArn to be of type string, got %T instead", value) + } + sv.KMSMasterKeyArn = ptr.String(jtv) + } + + case "SSEType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected SSEType to be of type string, got %T instead", value) + } + sv.SSEType = types.SSEType(jtv) + } + + case "Status": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected SSEStatus to be of type string, got %T instead", value) + } + sv.Status = types.SSEStatus(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentSSESpecification(v **types.SSESpecification, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.SSESpecification + if *v == nil { + sv = &types.SSESpecification{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Enabled": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected SSEEnabled to be of type *bool, got %T instead", value) + } + sv.Enabled = ptr.Bool(jtv) + } + + case "KMSMasterKeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KMSMasterKeyId to be of type string, got %T instead", value) + } + sv.KMSMasterKeyId = ptr.String(jtv) + } + + case "SSEType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected SSEType to be of type string, got %T instead", value) + } + sv.SSEType = types.SSEType(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentStreamSpecification(v **types.StreamSpecification, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.StreamSpecification + if *v == nil { + sv = &types.StreamSpecification{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "StreamEnabled": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected StreamEnabled to be of type *bool, got %T instead", value) + } + sv.StreamEnabled = ptr.Bool(jtv) + } + + case "StreamViewType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected StreamViewType to be of type string, got %T instead", value) + } + sv.StreamViewType = types.StreamViewType(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentStringSetAttributeValue(v *[]string, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []string + if *v == nil { + cv = []string{} + } else { + cv = *v + } + + for _, value := range shape { + var col string + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected StringAttributeValue to be of type string, got %T instead", value) + } + col = jtv + } + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson10_deserializeDocumentTableAlreadyExistsException(v **types.TableAlreadyExistsException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.TableAlreadyExistsException + if *v == nil { + sv = &types.TableAlreadyExistsException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message", "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentTableAutoScalingDescription(v **types.TableAutoScalingDescription, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.TableAutoScalingDescription + if *v == nil { + sv = &types.TableAutoScalingDescription{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Replicas": + if err := awsAwsjson10_deserializeDocumentReplicaAutoScalingDescriptionList(&sv.Replicas, value); err != nil { + return err + } + + case "TableName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TableName to be of type string, got %T instead", value) + } + sv.TableName = ptr.String(jtv) + } + + case "TableStatus": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TableStatus to be of type string, got %T instead", value) + } + sv.TableStatus = types.TableStatus(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentTableClassSummary(v **types.TableClassSummary, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.TableClassSummary + if *v == nil { + sv = &types.TableClassSummary{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "LastUpdateDateTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.LastUpdateDateTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Date to be a JSON Number, got %T instead", value) + + } + } + + case "TableClass": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TableClass to be of type string, got %T instead", value) + } + sv.TableClass = types.TableClass(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentTableCreationParameters(v **types.TableCreationParameters, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.TableCreationParameters + if *v == nil { + sv = &types.TableCreationParameters{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "AttributeDefinitions": + if err := awsAwsjson10_deserializeDocumentAttributeDefinitions(&sv.AttributeDefinitions, value); err != nil { + return err + } + + case "BillingMode": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected BillingMode to be of type string, got %T instead", value) + } + sv.BillingMode = types.BillingMode(jtv) + } + + case "GlobalSecondaryIndexes": + if err := awsAwsjson10_deserializeDocumentGlobalSecondaryIndexList(&sv.GlobalSecondaryIndexes, value); err != nil { + return err + } + + case "KeySchema": + if err := awsAwsjson10_deserializeDocumentKeySchema(&sv.KeySchema, value); err != nil { + return err + } + + case "OnDemandThroughput": + if err := awsAwsjson10_deserializeDocumentOnDemandThroughput(&sv.OnDemandThroughput, value); err != nil { + return err + } + + case "ProvisionedThroughput": + if err := awsAwsjson10_deserializeDocumentProvisionedThroughput(&sv.ProvisionedThroughput, value); err != nil { + return err + } + + case "SSESpecification": + if err := awsAwsjson10_deserializeDocumentSSESpecification(&sv.SSESpecification, value); err != nil { + return err + } + + case "TableName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TableName to be of type string, got %T instead", value) + } + sv.TableName = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentTableDescription(v **types.TableDescription, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.TableDescription + if *v == nil { + sv = &types.TableDescription{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ArchivalSummary": + if err := awsAwsjson10_deserializeDocumentArchivalSummary(&sv.ArchivalSummary, value); err != nil { + return err + } + + case "AttributeDefinitions": + if err := awsAwsjson10_deserializeDocumentAttributeDefinitions(&sv.AttributeDefinitions, value); err != nil { + return err + } + + case "BillingModeSummary": + if err := awsAwsjson10_deserializeDocumentBillingModeSummary(&sv.BillingModeSummary, value); err != nil { + return err + } + + case "CreationDateTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.CreationDateTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Date to be a JSON Number, got %T instead", value) + + } + } + + case "DeletionProtectionEnabled": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected DeletionProtectionEnabled to be of type *bool, got %T instead", value) + } + sv.DeletionProtectionEnabled = ptr.Bool(jtv) + } + + case "GlobalSecondaryIndexes": + if err := awsAwsjson10_deserializeDocumentGlobalSecondaryIndexDescriptionList(&sv.GlobalSecondaryIndexes, value); err != nil { + return err + } + + case "GlobalTableVersion": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.GlobalTableVersion = ptr.String(jtv) + } + + case "GlobalTableWitnesses": + if err := awsAwsjson10_deserializeDocumentGlobalTableWitnessDescriptionList(&sv.GlobalTableWitnesses, value); err != nil { + return err + } + + case "ItemCount": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected LongObject to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.ItemCount = ptr.Int64(i64) + } + + case "KeySchema": + if err := awsAwsjson10_deserializeDocumentKeySchema(&sv.KeySchema, value); err != nil { + return err + } + + case "LatestStreamArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected StreamArn to be of type string, got %T instead", value) + } + sv.LatestStreamArn = ptr.String(jtv) + } + + case "LatestStreamLabel": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.LatestStreamLabel = ptr.String(jtv) + } + + case "LocalSecondaryIndexes": + if err := awsAwsjson10_deserializeDocumentLocalSecondaryIndexDescriptionList(&sv.LocalSecondaryIndexes, value); err != nil { + return err + } + + case "MultiRegionConsistency": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected MultiRegionConsistency to be of type string, got %T instead", value) + } + sv.MultiRegionConsistency = types.MultiRegionConsistency(jtv) + } + + case "OnDemandThroughput": + if err := awsAwsjson10_deserializeDocumentOnDemandThroughput(&sv.OnDemandThroughput, value); err != nil { + return err + } + + case "ProvisionedThroughput": + if err := awsAwsjson10_deserializeDocumentProvisionedThroughputDescription(&sv.ProvisionedThroughput, value); err != nil { + return err + } + + case "Replicas": + if err := awsAwsjson10_deserializeDocumentReplicaDescriptionList(&sv.Replicas, value); err != nil { + return err + } + + case "RestoreSummary": + if err := awsAwsjson10_deserializeDocumentRestoreSummary(&sv.RestoreSummary, value); err != nil { + return err + } + + case "SSEDescription": + if err := awsAwsjson10_deserializeDocumentSSEDescription(&sv.SSEDescription, value); err != nil { + return err + } + + case "StreamSpecification": + if err := awsAwsjson10_deserializeDocumentStreamSpecification(&sv.StreamSpecification, value); err != nil { + return err + } + + case "TableArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.TableArn = ptr.String(jtv) + } + + case "TableClassSummary": + if err := awsAwsjson10_deserializeDocumentTableClassSummary(&sv.TableClassSummary, value); err != nil { + return err + } + + case "TableId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TableId to be of type string, got %T instead", value) + } + sv.TableId = ptr.String(jtv) + } + + case "TableName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TableName to be of type string, got %T instead", value) + } + sv.TableName = ptr.String(jtv) + } + + case "TableSizeBytes": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected LongObject to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.TableSizeBytes = ptr.Int64(i64) + } + + case "TableStatus": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TableStatus to be of type string, got %T instead", value) + } + sv.TableStatus = types.TableStatus(jtv) + } + + case "WarmThroughput": + if err := awsAwsjson10_deserializeDocumentTableWarmThroughputDescription(&sv.WarmThroughput, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentTableInUseException(v **types.TableInUseException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.TableInUseException + if *v == nil { + sv = &types.TableInUseException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message", "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentTableNameList(v *[]string, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []string + if *v == nil { + cv = []string{} + } else { + cv = *v + } + + for _, value := range shape { + var col string + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TableName to be of type string, got %T instead", value) + } + col = jtv + } + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson10_deserializeDocumentTableNotFoundException(v **types.TableNotFoundException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.TableNotFoundException + if *v == nil { + sv = &types.TableNotFoundException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message", "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentTableWarmThroughputDescription(v **types.TableWarmThroughputDescription, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.TableWarmThroughputDescription + if *v == nil { + sv = &types.TableWarmThroughputDescription{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ReadUnitsPerSecond": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected PositiveLongObject to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.ReadUnitsPerSecond = ptr.Int64(i64) + } + + case "Status": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TableStatus to be of type string, got %T instead", value) + } + sv.Status = types.TableStatus(jtv) + } + + case "WriteUnitsPerSecond": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected PositiveLongObject to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.WriteUnitsPerSecond = ptr.Int64(i64) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentTag(v **types.Tag, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.Tag + if *v == nil { + sv = &types.Tag{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Key": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TagKeyString to be of type string, got %T instead", value) + } + sv.Key = ptr.String(jtv) + } + + case "Value": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TagValueString to be of type string, got %T instead", value) + } + sv.Value = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentTagList(v *[]types.Tag, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.Tag + if *v == nil { + cv = []types.Tag{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.Tag + destAddr := &col + if err := awsAwsjson10_deserializeDocumentTag(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson10_deserializeDocumentThrottlingException(v **types.ThrottlingException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ThrottlingException + if *v == nil { + sv = &types.ThrottlingException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message", "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected AvailabilityErrorMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + case "throttlingReasons": + if err := awsAwsjson10_deserializeDocumentThrottlingReasonList(&sv.ThrottlingReasons, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentThrottlingReason(v **types.ThrottlingReason, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ThrottlingReason + if *v == nil { + sv = &types.ThrottlingReason{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "reason": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Reason to be of type string, got %T instead", value) + } + sv.Reason = ptr.String(jtv) + } + + case "resource": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Resource to be of type string, got %T instead", value) + } + sv.Resource = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentThrottlingReasonList(v *[]types.ThrottlingReason, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.ThrottlingReason + if *v == nil { + cv = []types.ThrottlingReason{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.ThrottlingReason + destAddr := &col + if err := awsAwsjson10_deserializeDocumentThrottlingReason(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson10_deserializeDocumentTimeToLiveDescription(v **types.TimeToLiveDescription, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.TimeToLiveDescription + if *v == nil { + sv = &types.TimeToLiveDescription{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "AttributeName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TimeToLiveAttributeName to be of type string, got %T instead", value) + } + sv.AttributeName = ptr.String(jtv) + } + + case "TimeToLiveStatus": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TimeToLiveStatus to be of type string, got %T instead", value) + } + sv.TimeToLiveStatus = types.TimeToLiveStatus(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentTimeToLiveSpecification(v **types.TimeToLiveSpecification, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.TimeToLiveSpecification + if *v == nil { + sv = &types.TimeToLiveSpecification{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "AttributeName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TimeToLiveAttributeName to be of type string, got %T instead", value) + } + sv.AttributeName = ptr.String(jtv) + } + + case "Enabled": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected TimeToLiveEnabled to be of type *bool, got %T instead", value) + } + sv.Enabled = ptr.Bool(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentTransactionCanceledException(v **types.TransactionCanceledException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.TransactionCanceledException + if *v == nil { + sv = &types.TransactionCanceledException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "CancellationReasons": + if err := awsAwsjson10_deserializeDocumentCancellationReasonList(&sv.CancellationReasons, value); err != nil { + return err + } + + case "message", "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentTransactionConflictException(v **types.TransactionConflictException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.TransactionConflictException + if *v == nil { + sv = &types.TransactionConflictException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message", "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentTransactionInProgressException(v **types.TransactionInProgressException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.TransactionInProgressException + if *v == nil { + sv = &types.TransactionInProgressException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message", "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentUpdateKinesisStreamingConfiguration(v **types.UpdateKinesisStreamingConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.UpdateKinesisStreamingConfiguration + if *v == nil { + sv = &types.UpdateKinesisStreamingConfiguration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ApproximateCreationDateTimePrecision": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ApproximateCreationDateTimePrecision to be of type string, got %T instead", value) + } + sv.ApproximateCreationDateTimePrecision = types.ApproximateCreationDateTimePrecision(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentWarmThroughput(v **types.WarmThroughput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.WarmThroughput + if *v == nil { + sv = &types.WarmThroughput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ReadUnitsPerSecond": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected LongObject to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.ReadUnitsPerSecond = ptr.Int64(i64) + } + + case "WriteUnitsPerSecond": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected LongObject to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.WriteUnitsPerSecond = ptr.Int64(i64) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentWriteRequest(v **types.WriteRequest, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.WriteRequest + if *v == nil { + sv = &types.WriteRequest{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "DeleteRequest": + if err := awsAwsjson10_deserializeDocumentDeleteRequest(&sv.DeleteRequest, value); err != nil { + return err + } + + case "PutRequest": + if err := awsAwsjson10_deserializeDocumentPutRequest(&sv.PutRequest, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentWriteRequests(v *[]types.WriteRequest, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.WriteRequest + if *v == nil { + cv = []types.WriteRequest{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.WriteRequest + destAddr := &col + if err := awsAwsjson10_deserializeDocumentWriteRequest(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson10_deserializeOpDocumentBatchExecuteStatementOutput(v **BatchExecuteStatementOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *BatchExecuteStatementOutput + if *v == nil { + sv = &BatchExecuteStatementOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ConsumedCapacity": + if err := awsAwsjson10_deserializeDocumentConsumedCapacityMultiple(&sv.ConsumedCapacity, value); err != nil { + return err + } + + case "Responses": + if err := awsAwsjson10_deserializeDocumentPartiQLBatchResponse(&sv.Responses, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeOpDocumentBatchGetItemOutput(v **BatchGetItemOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *BatchGetItemOutput + if *v == nil { + sv = &BatchGetItemOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ConsumedCapacity": + if err := awsAwsjson10_deserializeDocumentConsumedCapacityMultiple(&sv.ConsumedCapacity, value); err != nil { + return err + } + + case "Responses": + if err := awsAwsjson10_deserializeDocumentBatchGetResponseMap(&sv.Responses, value); err != nil { + return err + } + + case "UnprocessedKeys": + if err := awsAwsjson10_deserializeDocumentBatchGetRequestMap(&sv.UnprocessedKeys, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeOpDocumentBatchWriteItemOutput(v **BatchWriteItemOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *BatchWriteItemOutput + if *v == nil { + sv = &BatchWriteItemOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ConsumedCapacity": + if err := awsAwsjson10_deserializeDocumentConsumedCapacityMultiple(&sv.ConsumedCapacity, value); err != nil { + return err + } + + case "ItemCollectionMetrics": + if err := awsAwsjson10_deserializeDocumentItemCollectionMetricsPerTable(&sv.ItemCollectionMetrics, value); err != nil { + return err + } + + case "UnprocessedItems": + if err := awsAwsjson10_deserializeDocumentBatchWriteItemRequestMap(&sv.UnprocessedItems, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeOpDocumentCreateBackupOutput(v **CreateBackupOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *CreateBackupOutput + if *v == nil { + sv = &CreateBackupOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "BackupDetails": + if err := awsAwsjson10_deserializeDocumentBackupDetails(&sv.BackupDetails, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeOpDocumentCreateGlobalTableOutput(v **CreateGlobalTableOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *CreateGlobalTableOutput + if *v == nil { + sv = &CreateGlobalTableOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "GlobalTableDescription": + if err := awsAwsjson10_deserializeDocumentGlobalTableDescription(&sv.GlobalTableDescription, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeOpDocumentCreateTableOutput(v **CreateTableOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *CreateTableOutput + if *v == nil { + sv = &CreateTableOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "TableDescription": + if err := awsAwsjson10_deserializeDocumentTableDescription(&sv.TableDescription, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeOpDocumentDeleteBackupOutput(v **DeleteBackupOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DeleteBackupOutput + if *v == nil { + sv = &DeleteBackupOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "BackupDescription": + if err := awsAwsjson10_deserializeDocumentBackupDescription(&sv.BackupDescription, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeOpDocumentDeleteItemOutput(v **DeleteItemOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DeleteItemOutput + if *v == nil { + sv = &DeleteItemOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Attributes": + if err := awsAwsjson10_deserializeDocumentAttributeMap(&sv.Attributes, value); err != nil { + return err + } + + case "ConsumedCapacity": + if err := awsAwsjson10_deserializeDocumentConsumedCapacity(&sv.ConsumedCapacity, value); err != nil { + return err + } + + case "ItemCollectionMetrics": + if err := awsAwsjson10_deserializeDocumentItemCollectionMetrics(&sv.ItemCollectionMetrics, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeOpDocumentDeleteResourcePolicyOutput(v **DeleteResourcePolicyOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DeleteResourcePolicyOutput + if *v == nil { + sv = &DeleteResourcePolicyOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "RevisionId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PolicyRevisionId to be of type string, got %T instead", value) + } + sv.RevisionId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeOpDocumentDeleteTableOutput(v **DeleteTableOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DeleteTableOutput + if *v == nil { + sv = &DeleteTableOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "TableDescription": + if err := awsAwsjson10_deserializeDocumentTableDescription(&sv.TableDescription, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeOpDocumentDescribeBackupOutput(v **DescribeBackupOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DescribeBackupOutput + if *v == nil { + sv = &DescribeBackupOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "BackupDescription": + if err := awsAwsjson10_deserializeDocumentBackupDescription(&sv.BackupDescription, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeOpDocumentDescribeContinuousBackupsOutput(v **DescribeContinuousBackupsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DescribeContinuousBackupsOutput + if *v == nil { + sv = &DescribeContinuousBackupsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ContinuousBackupsDescription": + if err := awsAwsjson10_deserializeDocumentContinuousBackupsDescription(&sv.ContinuousBackupsDescription, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeOpDocumentDescribeContributorInsightsOutput(v **DescribeContributorInsightsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DescribeContributorInsightsOutput + if *v == nil { + sv = &DescribeContributorInsightsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ContributorInsightsMode": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ContributorInsightsMode to be of type string, got %T instead", value) + } + sv.ContributorInsightsMode = types.ContributorInsightsMode(jtv) + } + + case "ContributorInsightsRuleList": + if err := awsAwsjson10_deserializeDocumentContributorInsightsRuleList(&sv.ContributorInsightsRuleList, value); err != nil { + return err + } + + case "ContributorInsightsStatus": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ContributorInsightsStatus to be of type string, got %T instead", value) + } + sv.ContributorInsightsStatus = types.ContributorInsightsStatus(jtv) + } + + case "FailureException": + if err := awsAwsjson10_deserializeDocumentFailureException(&sv.FailureException, value); err != nil { + return err + } + + case "IndexName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected IndexName to be of type string, got %T instead", value) + } + sv.IndexName = ptr.String(jtv) + } + + case "LastUpdateDateTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.LastUpdateDateTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected LastUpdateDateTime to be a JSON Number, got %T instead", value) + + } + } + + case "TableName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TableName to be of type string, got %T instead", value) + } + sv.TableName = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeOpDocumentDescribeEndpointsOutput(v **DescribeEndpointsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DescribeEndpointsOutput + if *v == nil { + sv = &DescribeEndpointsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Endpoints": + if err := awsAwsjson10_deserializeDocumentEndpoints(&sv.Endpoints, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeOpDocumentDescribeExportOutput(v **DescribeExportOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DescribeExportOutput + if *v == nil { + sv = &DescribeExportOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ExportDescription": + if err := awsAwsjson10_deserializeDocumentExportDescription(&sv.ExportDescription, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeOpDocumentDescribeGlobalTableOutput(v **DescribeGlobalTableOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DescribeGlobalTableOutput + if *v == nil { + sv = &DescribeGlobalTableOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "GlobalTableDescription": + if err := awsAwsjson10_deserializeDocumentGlobalTableDescription(&sv.GlobalTableDescription, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeOpDocumentDescribeGlobalTableSettingsOutput(v **DescribeGlobalTableSettingsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DescribeGlobalTableSettingsOutput + if *v == nil { + sv = &DescribeGlobalTableSettingsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "GlobalTableName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TableName to be of type string, got %T instead", value) + } + sv.GlobalTableName = ptr.String(jtv) + } + + case "ReplicaSettings": + if err := awsAwsjson10_deserializeDocumentReplicaSettingsDescriptionList(&sv.ReplicaSettings, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeOpDocumentDescribeImportOutput(v **DescribeImportOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DescribeImportOutput + if *v == nil { + sv = &DescribeImportOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ImportTableDescription": + if err := awsAwsjson10_deserializeDocumentImportTableDescription(&sv.ImportTableDescription, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeOpDocumentDescribeKinesisStreamingDestinationOutput(v **DescribeKinesisStreamingDestinationOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DescribeKinesisStreamingDestinationOutput + if *v == nil { + sv = &DescribeKinesisStreamingDestinationOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "KinesisDataStreamDestinations": + if err := awsAwsjson10_deserializeDocumentKinesisDataStreamDestinations(&sv.KinesisDataStreamDestinations, value); err != nil { + return err + } + + case "TableName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TableName to be of type string, got %T instead", value) + } + sv.TableName = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeOpDocumentDescribeLimitsOutput(v **DescribeLimitsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DescribeLimitsOutput + if *v == nil { + sv = &DescribeLimitsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "AccountMaxReadCapacityUnits": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected PositiveLongObject to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.AccountMaxReadCapacityUnits = ptr.Int64(i64) + } + + case "AccountMaxWriteCapacityUnits": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected PositiveLongObject to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.AccountMaxWriteCapacityUnits = ptr.Int64(i64) + } + + case "TableMaxReadCapacityUnits": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected PositiveLongObject to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.TableMaxReadCapacityUnits = ptr.Int64(i64) + } + + case "TableMaxWriteCapacityUnits": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected PositiveLongObject to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.TableMaxWriteCapacityUnits = ptr.Int64(i64) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeOpDocumentDescribeTableOutput(v **DescribeTableOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DescribeTableOutput + if *v == nil { + sv = &DescribeTableOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Table": + if err := awsAwsjson10_deserializeDocumentTableDescription(&sv.Table, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeOpDocumentDescribeTableReplicaAutoScalingOutput(v **DescribeTableReplicaAutoScalingOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DescribeTableReplicaAutoScalingOutput + if *v == nil { + sv = &DescribeTableReplicaAutoScalingOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "TableAutoScalingDescription": + if err := awsAwsjson10_deserializeDocumentTableAutoScalingDescription(&sv.TableAutoScalingDescription, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeOpDocumentDescribeTimeToLiveOutput(v **DescribeTimeToLiveOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DescribeTimeToLiveOutput + if *v == nil { + sv = &DescribeTimeToLiveOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "TimeToLiveDescription": + if err := awsAwsjson10_deserializeDocumentTimeToLiveDescription(&sv.TimeToLiveDescription, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeOpDocumentDisableKinesisStreamingDestinationOutput(v **DisableKinesisStreamingDestinationOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DisableKinesisStreamingDestinationOutput + if *v == nil { + sv = &DisableKinesisStreamingDestinationOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "DestinationStatus": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DestinationStatus to be of type string, got %T instead", value) + } + sv.DestinationStatus = types.DestinationStatus(jtv) + } + + case "EnableKinesisStreamingConfiguration": + if err := awsAwsjson10_deserializeDocumentEnableKinesisStreamingConfiguration(&sv.EnableKinesisStreamingConfiguration, value); err != nil { + return err + } + + case "StreamArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected StreamArn to be of type string, got %T instead", value) + } + sv.StreamArn = ptr.String(jtv) + } + + case "TableName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TableName to be of type string, got %T instead", value) + } + sv.TableName = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeOpDocumentEnableKinesisStreamingDestinationOutput(v **EnableKinesisStreamingDestinationOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *EnableKinesisStreamingDestinationOutput + if *v == nil { + sv = &EnableKinesisStreamingDestinationOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "DestinationStatus": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DestinationStatus to be of type string, got %T instead", value) + } + sv.DestinationStatus = types.DestinationStatus(jtv) + } + + case "EnableKinesisStreamingConfiguration": + if err := awsAwsjson10_deserializeDocumentEnableKinesisStreamingConfiguration(&sv.EnableKinesisStreamingConfiguration, value); err != nil { + return err + } + + case "StreamArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected StreamArn to be of type string, got %T instead", value) + } + sv.StreamArn = ptr.String(jtv) + } + + case "TableName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TableName to be of type string, got %T instead", value) + } + sv.TableName = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeOpDocumentExecuteStatementOutput(v **ExecuteStatementOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ExecuteStatementOutput + if *v == nil { + sv = &ExecuteStatementOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ConsumedCapacity": + if err := awsAwsjson10_deserializeDocumentConsumedCapacity(&sv.ConsumedCapacity, value); err != nil { + return err + } + + case "Items": + if err := awsAwsjson10_deserializeDocumentItemList(&sv.Items, value); err != nil { + return err + } + + case "LastEvaluatedKey": + if err := awsAwsjson10_deserializeDocumentKey(&sv.LastEvaluatedKey, value); err != nil { + return err + } + + case "NextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PartiQLNextToken to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeOpDocumentExecuteTransactionOutput(v **ExecuteTransactionOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ExecuteTransactionOutput + if *v == nil { + sv = &ExecuteTransactionOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ConsumedCapacity": + if err := awsAwsjson10_deserializeDocumentConsumedCapacityMultiple(&sv.ConsumedCapacity, value); err != nil { + return err + } + + case "Responses": + if err := awsAwsjson10_deserializeDocumentItemResponseList(&sv.Responses, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeOpDocumentExportTableToPointInTimeOutput(v **ExportTableToPointInTimeOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ExportTableToPointInTimeOutput + if *v == nil { + sv = &ExportTableToPointInTimeOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ExportDescription": + if err := awsAwsjson10_deserializeDocumentExportDescription(&sv.ExportDescription, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeOpDocumentGetItemOutput(v **GetItemOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GetItemOutput + if *v == nil { + sv = &GetItemOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ConsumedCapacity": + if err := awsAwsjson10_deserializeDocumentConsumedCapacity(&sv.ConsumedCapacity, value); err != nil { + return err + } + + case "Item": + if err := awsAwsjson10_deserializeDocumentAttributeMap(&sv.Item, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeOpDocumentGetResourcePolicyOutput(v **GetResourcePolicyOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GetResourcePolicyOutput + if *v == nil { + sv = &GetResourcePolicyOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Policy": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ResourcePolicy to be of type string, got %T instead", value) + } + sv.Policy = ptr.String(jtv) + } + + case "RevisionId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PolicyRevisionId to be of type string, got %T instead", value) + } + sv.RevisionId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeOpDocumentImportTableOutput(v **ImportTableOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ImportTableOutput + if *v == nil { + sv = &ImportTableOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ImportTableDescription": + if err := awsAwsjson10_deserializeDocumentImportTableDescription(&sv.ImportTableDescription, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeOpDocumentListBackupsOutput(v **ListBackupsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListBackupsOutput + if *v == nil { + sv = &ListBackupsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "BackupSummaries": + if err := awsAwsjson10_deserializeDocumentBackupSummaries(&sv.BackupSummaries, value); err != nil { + return err + } + + case "LastEvaluatedBackupArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected BackupArn to be of type string, got %T instead", value) + } + sv.LastEvaluatedBackupArn = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeOpDocumentListContributorInsightsOutput(v **ListContributorInsightsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListContributorInsightsOutput + if *v == nil { + sv = &ListContributorInsightsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ContributorInsightsSummaries": + if err := awsAwsjson10_deserializeDocumentContributorInsightsSummaries(&sv.ContributorInsightsSummaries, value); err != nil { + return err + } + + case "NextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NextTokenString to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeOpDocumentListExportsOutput(v **ListExportsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListExportsOutput + if *v == nil { + sv = &ListExportsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ExportSummaries": + if err := awsAwsjson10_deserializeDocumentExportSummaries(&sv.ExportSummaries, value); err != nil { + return err + } + + case "NextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExportNextToken to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeOpDocumentListGlobalTablesOutput(v **ListGlobalTablesOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListGlobalTablesOutput + if *v == nil { + sv = &ListGlobalTablesOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "GlobalTables": + if err := awsAwsjson10_deserializeDocumentGlobalTableList(&sv.GlobalTables, value); err != nil { + return err + } + + case "LastEvaluatedGlobalTableName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TableName to be of type string, got %T instead", value) + } + sv.LastEvaluatedGlobalTableName = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeOpDocumentListImportsOutput(v **ListImportsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListImportsOutput + if *v == nil { + sv = &ListImportsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ImportSummaryList": + if err := awsAwsjson10_deserializeDocumentImportSummaryList(&sv.ImportSummaryList, value); err != nil { + return err + } + + case "NextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ImportNextToken to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeOpDocumentListTablesOutput(v **ListTablesOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListTablesOutput + if *v == nil { + sv = &ListTablesOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "LastEvaluatedTableName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TableName to be of type string, got %T instead", value) + } + sv.LastEvaluatedTableName = ptr.String(jtv) + } + + case "TableNames": + if err := awsAwsjson10_deserializeDocumentTableNameList(&sv.TableNames, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeOpDocumentListTagsOfResourceOutput(v **ListTagsOfResourceOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListTagsOfResourceOutput + if *v == nil { + sv = &ListTagsOfResourceOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "NextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NextTokenString to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + case "Tags": + if err := awsAwsjson10_deserializeDocumentTagList(&sv.Tags, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeOpDocumentPutItemOutput(v **PutItemOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *PutItemOutput + if *v == nil { + sv = &PutItemOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Attributes": + if err := awsAwsjson10_deserializeDocumentAttributeMap(&sv.Attributes, value); err != nil { + return err + } + + case "ConsumedCapacity": + if err := awsAwsjson10_deserializeDocumentConsumedCapacity(&sv.ConsumedCapacity, value); err != nil { + return err + } + + case "ItemCollectionMetrics": + if err := awsAwsjson10_deserializeDocumentItemCollectionMetrics(&sv.ItemCollectionMetrics, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeOpDocumentPutResourcePolicyOutput(v **PutResourcePolicyOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *PutResourcePolicyOutput + if *v == nil { + sv = &PutResourcePolicyOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "RevisionId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PolicyRevisionId to be of type string, got %T instead", value) + } + sv.RevisionId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeOpDocumentQueryOutput(v **QueryOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *QueryOutput + if *v == nil { + sv = &QueryOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ConsumedCapacity": + if err := awsAwsjson10_deserializeDocumentConsumedCapacity(&sv.ConsumedCapacity, value); err != nil { + return err + } + + case "Count": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.Count = int32(i64) + } + + case "Items": + if err := awsAwsjson10_deserializeDocumentItemList(&sv.Items, value); err != nil { + return err + } + + case "LastEvaluatedKey": + if err := awsAwsjson10_deserializeDocumentKey(&sv.LastEvaluatedKey, value); err != nil { + return err + } + + case "ScannedCount": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.ScannedCount = int32(i64) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeOpDocumentRestoreTableFromBackupOutput(v **RestoreTableFromBackupOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *RestoreTableFromBackupOutput + if *v == nil { + sv = &RestoreTableFromBackupOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "TableDescription": + if err := awsAwsjson10_deserializeDocumentTableDescription(&sv.TableDescription, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeOpDocumentRestoreTableToPointInTimeOutput(v **RestoreTableToPointInTimeOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *RestoreTableToPointInTimeOutput + if *v == nil { + sv = &RestoreTableToPointInTimeOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "TableDescription": + if err := awsAwsjson10_deserializeDocumentTableDescription(&sv.TableDescription, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeOpDocumentScanOutput(v **ScanOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ScanOutput + if *v == nil { + sv = &ScanOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ConsumedCapacity": + if err := awsAwsjson10_deserializeDocumentConsumedCapacity(&sv.ConsumedCapacity, value); err != nil { + return err + } + + case "Count": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.Count = int32(i64) + } + + case "Items": + if err := awsAwsjson10_deserializeDocumentItemList(&sv.Items, value); err != nil { + return err + } + + case "LastEvaluatedKey": + if err := awsAwsjson10_deserializeDocumentKey(&sv.LastEvaluatedKey, value); err != nil { + return err + } + + case "ScannedCount": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.ScannedCount = int32(i64) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeOpDocumentTransactGetItemsOutput(v **TransactGetItemsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *TransactGetItemsOutput + if *v == nil { + sv = &TransactGetItemsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ConsumedCapacity": + if err := awsAwsjson10_deserializeDocumentConsumedCapacityMultiple(&sv.ConsumedCapacity, value); err != nil { + return err + } + + case "Responses": + if err := awsAwsjson10_deserializeDocumentItemResponseList(&sv.Responses, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeOpDocumentTransactWriteItemsOutput(v **TransactWriteItemsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *TransactWriteItemsOutput + if *v == nil { + sv = &TransactWriteItemsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ConsumedCapacity": + if err := awsAwsjson10_deserializeDocumentConsumedCapacityMultiple(&sv.ConsumedCapacity, value); err != nil { + return err + } + + case "ItemCollectionMetrics": + if err := awsAwsjson10_deserializeDocumentItemCollectionMetricsPerTable(&sv.ItemCollectionMetrics, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeOpDocumentUpdateContinuousBackupsOutput(v **UpdateContinuousBackupsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *UpdateContinuousBackupsOutput + if *v == nil { + sv = &UpdateContinuousBackupsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ContinuousBackupsDescription": + if err := awsAwsjson10_deserializeDocumentContinuousBackupsDescription(&sv.ContinuousBackupsDescription, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeOpDocumentUpdateContributorInsightsOutput(v **UpdateContributorInsightsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *UpdateContributorInsightsOutput + if *v == nil { + sv = &UpdateContributorInsightsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ContributorInsightsMode": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ContributorInsightsMode to be of type string, got %T instead", value) + } + sv.ContributorInsightsMode = types.ContributorInsightsMode(jtv) + } + + case "ContributorInsightsStatus": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ContributorInsightsStatus to be of type string, got %T instead", value) + } + sv.ContributorInsightsStatus = types.ContributorInsightsStatus(jtv) + } + + case "IndexName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected IndexName to be of type string, got %T instead", value) + } + sv.IndexName = ptr.String(jtv) + } + + case "TableName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TableName to be of type string, got %T instead", value) + } + sv.TableName = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeOpDocumentUpdateGlobalTableOutput(v **UpdateGlobalTableOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *UpdateGlobalTableOutput + if *v == nil { + sv = &UpdateGlobalTableOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "GlobalTableDescription": + if err := awsAwsjson10_deserializeDocumentGlobalTableDescription(&sv.GlobalTableDescription, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeOpDocumentUpdateGlobalTableSettingsOutput(v **UpdateGlobalTableSettingsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *UpdateGlobalTableSettingsOutput + if *v == nil { + sv = &UpdateGlobalTableSettingsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "GlobalTableName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TableName to be of type string, got %T instead", value) + } + sv.GlobalTableName = ptr.String(jtv) + } + + case "ReplicaSettings": + if err := awsAwsjson10_deserializeDocumentReplicaSettingsDescriptionList(&sv.ReplicaSettings, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeOpDocumentUpdateItemOutput(v **UpdateItemOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *UpdateItemOutput + if *v == nil { + sv = &UpdateItemOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Attributes": + if err := awsAwsjson10_deserializeDocumentAttributeMap(&sv.Attributes, value); err != nil { + return err + } + + case "ConsumedCapacity": + if err := awsAwsjson10_deserializeDocumentConsumedCapacity(&sv.ConsumedCapacity, value); err != nil { + return err + } + + case "ItemCollectionMetrics": + if err := awsAwsjson10_deserializeDocumentItemCollectionMetrics(&sv.ItemCollectionMetrics, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeOpDocumentUpdateKinesisStreamingDestinationOutput(v **UpdateKinesisStreamingDestinationOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *UpdateKinesisStreamingDestinationOutput + if *v == nil { + sv = &UpdateKinesisStreamingDestinationOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "DestinationStatus": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DestinationStatus to be of type string, got %T instead", value) + } + sv.DestinationStatus = types.DestinationStatus(jtv) + } + + case "StreamArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected StreamArn to be of type string, got %T instead", value) + } + sv.StreamArn = ptr.String(jtv) + } + + case "TableName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TableName to be of type string, got %T instead", value) + } + sv.TableName = ptr.String(jtv) + } + + case "UpdateKinesisStreamingConfiguration": + if err := awsAwsjson10_deserializeDocumentUpdateKinesisStreamingConfiguration(&sv.UpdateKinesisStreamingConfiguration, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeOpDocumentUpdateTableOutput(v **UpdateTableOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *UpdateTableOutput + if *v == nil { + sv = &UpdateTableOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "TableDescription": + if err := awsAwsjson10_deserializeDocumentTableDescription(&sv.TableDescription, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeOpDocumentUpdateTableReplicaAutoScalingOutput(v **UpdateTableReplicaAutoScalingOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *UpdateTableReplicaAutoScalingOutput + if *v == nil { + sv = &UpdateTableReplicaAutoScalingOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "TableAutoScalingDescription": + if err := awsAwsjson10_deserializeDocumentTableAutoScalingDescription(&sv.TableAutoScalingDescription, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeOpDocumentUpdateTimeToLiveOutput(v **UpdateTimeToLiveOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *UpdateTimeToLiveOutput + if *v == nil { + sv = &UpdateTimeToLiveOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "TimeToLiveSpecification": + if err := awsAwsjson10_deserializeDocumentTimeToLiveSpecification(&sv.TimeToLiveSpecification, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type protocolErrorInfo struct { + Type string `json:"__type"` + Message string + Code any // nonstandard for awsjson but some services do present the type here +} + +func getProtocolErrorInfo(decoder *json.Decoder) (protocolErrorInfo, error) { + var errInfo protocolErrorInfo + if err := decoder.Decode(&errInfo); err != nil { + if err == io.EOF { + return errInfo, nil + } + return errInfo, err + } + + return errInfo, nil +} + +func resolveProtocolErrorType(headerType string, bodyInfo protocolErrorInfo) (string, bool) { + if len(headerType) != 0 { + return headerType, true + } else if len(bodyInfo.Type) != 0 { + return bodyInfo.Type, true + } else if code, ok := bodyInfo.Code.(string); ok && len(code) != 0 { + return code, true + } + return "", false +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/doc.go new file mode 100644 index 0000000000..53f36085a5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/doc.go @@ -0,0 +1,26 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +// Package dynamodb provides the API client, operations, and parameter types for +// Amazon DynamoDB. +// +// # Amazon DynamoDB +// +// Amazon DynamoDB is a fully managed NoSQL database service that provides fast +// and predictable performance with seamless scalability. DynamoDB lets you offload +// the administrative burdens of operating and scaling a distributed database, so +// that you don't have to worry about hardware provisioning, setup and +// configuration, replication, software patching, or cluster scaling. +// +// With DynamoDB, you can create database tables that can store and retrieve any +// amount of data, and serve any level of request traffic. You can scale up or +// scale down your tables' throughput capacity without downtime or performance +// degradation, and use the Amazon Web Services Management Console to monitor +// resource utilization and performance metrics. +// +// DynamoDB automatically spreads the data and traffic for your tables over a +// sufficient number of servers to handle your throughput and storage requirements, +// while maintaining consistent and fast performance. All of your data is stored on +// solid state disks (SSDs) and automatically replicated across multiple +// Availability Zones in an Amazon Web Services Region, providing built-in high +// availability and data durability. +package dynamodb diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/endpoints.go new file mode 100644 index 0000000000..a859c043f0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/endpoints.go @@ -0,0 +1,813 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package dynamodb + +import ( + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + "github.com/aws/aws-sdk-go-v2/internal/endpoints" + "github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn" + internalendpoints "github.com/aws/aws-sdk-go-v2/service/dynamodb/internal/endpoints" + smithy "github.com/aws/smithy-go" + smithyauth "github.com/aws/smithy-go/auth" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/aws/smithy-go/endpoints/private/rulesfn" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + "github.com/aws/smithy-go/tracing" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net/http" + "net/url" + "os" + "strings" +) + +// EndpointResolverOptions is the service endpoint resolver options +type EndpointResolverOptions = internalendpoints.Options + +// EndpointResolver interface for resolving service endpoints. +type EndpointResolver interface { + ResolveEndpoint(region string, options EndpointResolverOptions) (aws.Endpoint, error) +} + +var _ EndpointResolver = &internalendpoints.Resolver{} + +// NewDefaultEndpointResolver constructs a new service endpoint resolver +func NewDefaultEndpointResolver() *internalendpoints.Resolver { + return internalendpoints.New() +} + +// EndpointResolverFunc is a helper utility that wraps a function so it satisfies +// the EndpointResolver interface. This is useful when you want to add additional +// endpoint resolving logic, or stub out specific endpoints with custom values. +type EndpointResolverFunc func(region string, options EndpointResolverOptions) (aws.Endpoint, error) + +func (fn EndpointResolverFunc) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { + return fn(region, options) +} + +// EndpointResolverFromURL returns an EndpointResolver configured using the +// provided endpoint url. By default, the resolved endpoint resolver uses the +// client region as signing region, and the endpoint source is set to +// EndpointSourceCustom.You can provide functional options to configure endpoint +// values for the resolved endpoint. +func EndpointResolverFromURL(url string, optFns ...func(*aws.Endpoint)) EndpointResolver { + e := aws.Endpoint{URL: url, Source: aws.EndpointSourceCustom} + for _, fn := range optFns { + fn(&e) + } + + return EndpointResolverFunc( + func(region string, options EndpointResolverOptions) (aws.Endpoint, error) { + if len(e.SigningRegion) == 0 { + e.SigningRegion = region + } + return e, nil + }, + ) +} + +type ResolveEndpoint struct { + Resolver EndpointResolver + Options EndpointResolverOptions +} + +func (*ResolveEndpoint) ID() string { + return "ResolveEndpoint" +} + +func (m *ResolveEndpoint) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + if !awsmiddleware.GetRequiresLegacyEndpoints(ctx) { + return next.HandleSerialize(ctx, in) + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.Resolver == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + eo := m.Options + eo.Logger = middleware.GetLogger(ctx) + + var endpoint aws.Endpoint + endpoint, err = m.Resolver.ResolveEndpoint(awsmiddleware.GetRegion(ctx), eo) + if err != nil { + nf := (&aws.EndpointNotFoundError{}) + if errors.As(err, &nf) { + ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, false) + return next.HandleSerialize(ctx, in) + } + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + req.URL, err = url.Parse(endpoint.URL) + if err != nil { + return out, metadata, fmt.Errorf("failed to parse endpoint URL: %w", err) + } + + if len(awsmiddleware.GetSigningName(ctx)) == 0 { + signingName := endpoint.SigningName + if len(signingName) == 0 { + signingName = "dynamodb" + } + ctx = awsmiddleware.SetSigningName(ctx, signingName) + } + ctx = awsmiddleware.SetEndpointSource(ctx, endpoint.Source) + ctx = smithyhttp.SetHostnameImmutable(ctx, endpoint.HostnameImmutable) + ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion) + ctx = awsmiddleware.SetPartitionID(ctx, endpoint.PartitionID) + return next.HandleSerialize(ctx, in) +} +func addResolveEndpointMiddleware(stack *middleware.Stack, o Options) error { + return stack.Serialize.Insert(&ResolveEndpoint{ + Resolver: o.EndpointResolver, + Options: o.EndpointOptions, + }, "OperationSerializer", middleware.Before) +} + +func removeResolveEndpointMiddleware(stack *middleware.Stack) error { + _, err := stack.Serialize.Remove((&ResolveEndpoint{}).ID()) + return err +} + +type wrappedEndpointResolver struct { + awsResolver aws.EndpointResolverWithOptions +} + +func (w *wrappedEndpointResolver) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { + return w.awsResolver.ResolveEndpoint(ServiceID, region, options) +} + +type awsEndpointResolverAdaptor func(service, region string) (aws.Endpoint, error) + +func (a awsEndpointResolverAdaptor) ResolveEndpoint(service, region string, options ...interface{}) (aws.Endpoint, error) { + return a(service, region) +} + +var _ aws.EndpointResolverWithOptions = awsEndpointResolverAdaptor(nil) + +// withEndpointResolver returns an aws.EndpointResolverWithOptions that first delegates endpoint resolution to the awsResolver. +// If awsResolver returns aws.EndpointNotFoundError error, the v1 resolver middleware will swallow the error, +// and set an appropriate context flag such that fallback will occur when EndpointResolverV2 is invoked +// via its middleware. +// +// If another error (besides aws.EndpointNotFoundError) is returned, then that error will be propagated. +func withEndpointResolver(awsResolver aws.EndpointResolver, awsResolverWithOptions aws.EndpointResolverWithOptions) EndpointResolver { + var resolver aws.EndpointResolverWithOptions + + if awsResolverWithOptions != nil { + resolver = awsResolverWithOptions + } else if awsResolver != nil { + resolver = awsEndpointResolverAdaptor(awsResolver.ResolveEndpoint) + } + + return &wrappedEndpointResolver{ + awsResolver: resolver, + } +} + +func finalizeClientEndpointResolverOptions(options *Options) { + options.EndpointOptions.LogDeprecated = options.ClientLogMode.IsDeprecatedUsage() + + if len(options.EndpointOptions.ResolvedRegion) == 0 { + const fipsInfix = "-fips-" + const fipsPrefix = "fips-" + const fipsSuffix = "-fips" + + if strings.Contains(options.Region, fipsInfix) || + strings.Contains(options.Region, fipsPrefix) || + strings.Contains(options.Region, fipsSuffix) { + options.EndpointOptions.ResolvedRegion = strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll( + options.Region, fipsInfix, "-"), fipsPrefix, ""), fipsSuffix, "") + options.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateEnabled + } + } + +} + +func resolveEndpointResolverV2(options *Options) { + if options.EndpointResolverV2 == nil { + options.EndpointResolverV2 = NewDefaultEndpointResolverV2() + } +} + +func resolveBaseEndpoint(cfg aws.Config, o *Options) { + if cfg.BaseEndpoint != nil { + o.BaseEndpoint = cfg.BaseEndpoint + } + + _, g := os.LookupEnv("AWS_ENDPOINT_URL") + _, s := os.LookupEnv("AWS_ENDPOINT_URL_DYNAMODB") + + if g && !s { + return + } + + value, found, err := internalConfig.ResolveServiceBaseEndpoint(context.Background(), "DynamoDB", cfg.ConfigSources) + if found && err == nil { + o.BaseEndpoint = &value + } +} + +func bindRegion(region string) *string { + if region == "" { + return nil + } + return aws.String(endpoints.MapFIPSRegion(region)) +} + +// EndpointParameters provides the parameters that influence how endpoints are +// resolved. +type EndpointParameters struct { + // The AWS region used to dispatch the request. + // + // Parameter is + // required. + // + // AWS::Region + Region *string + + // When true, use the dual-stack endpoint. If the configured endpoint does not + // support dual-stack, dispatching the request MAY return an error. + // + // Defaults to + // false if no value is provided. + // + // AWS::UseDualStack + UseDualStack *bool + + // When true, send this request to the FIPS-compliant regional endpoint. If the + // configured endpoint does not have a FIPS compliant endpoint, dispatching the + // request will return an error. + // + // Defaults to false if no value is + // provided. + // + // AWS::UseFIPS + UseFIPS *bool + + // Override the endpoint used to send this request + // + // Parameter is + // required. + // + // SDK::Endpoint + Endpoint *string + + // The AWS AccountId used for the request. + // + // Parameter is + // required. + // + // AWS::Auth::AccountId + AccountId *string + + // The AccountId Endpoint Mode. + // + // Parameter is + // required. + // + // AWS::Auth::AccountIdEndpointMode + AccountIdEndpointMode *string + + // ResourceArn containing arn of resource + // + // Parameter is required. + ResourceArn *string + + // ResourceArnList containing list of resource arns + // + // Parameter is required. + ResourceArnList []string +} + +// ValidateRequired validates required parameters are set. +func (p EndpointParameters) ValidateRequired() error { + if p.UseDualStack == nil { + return fmt.Errorf("parameter UseDualStack is required") + } + + if p.UseFIPS == nil { + return fmt.Errorf("parameter UseFIPS is required") + } + + return nil +} + +// WithDefaults returns a shallow copy of EndpointParameterswith default values +// applied to members where applicable. +func (p EndpointParameters) WithDefaults() EndpointParameters { + if p.UseDualStack == nil { + p.UseDualStack = ptr.Bool(false) + } + + if p.UseFIPS == nil { + p.UseFIPS = ptr.Bool(false) + } + return p +} + +type stringSlice []string + +func (s stringSlice) Get(i int) *string { + if i < 0 || i >= len(s) { + return nil + } + + v := s[i] + return &v +} + +// EndpointResolverV2 provides the interface for resolving service endpoints. +type EndpointResolverV2 interface { + // ResolveEndpoint attempts to resolve the endpoint with the provided options, + // returning the endpoint if found. Otherwise an error is returned. + ResolveEndpoint(ctx context.Context, params EndpointParameters) ( + smithyendpoints.Endpoint, error, + ) +} + +// resolver provides the implementation for resolving endpoints. +type resolver struct{} + +func NewDefaultEndpointResolverV2() EndpointResolverV2 { + return &resolver{} +} + +// ResolveEndpoint attempts to resolve the endpoint with the provided options, +// returning the endpoint if found. Otherwise an error is returned. +func (r *resolver) ResolveEndpoint( + ctx context.Context, params EndpointParameters, +) ( + endpoint smithyendpoints.Endpoint, err error, +) { + params = params.WithDefaults() + if err = params.ValidateRequired(); err != nil { + return endpoint, fmt.Errorf("endpoint parameters are not valid, %w", err) + } + _UseDualStack := *params.UseDualStack + _UseFIPS := *params.UseFIPS + + if exprVal := params.Endpoint; exprVal != nil { + _Endpoint := *exprVal + _ = _Endpoint + if _UseFIPS == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: FIPS and custom endpoint are not supported") + } + if _UseDualStack == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Dualstack and custom endpoint are not supported") + } + uriString := _Endpoint + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + if exprVal := params.Region; exprVal != nil { + _Region := *exprVal + _ = _Region + if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { + _PartitionResult := *exprVal + _ = _PartitionResult + if _Region == "local" { + if _UseFIPS == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: FIPS and local endpoint are not supported") + } + if _UseDualStack == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Dualstack and local endpoint are not supported") + } + uriString := "http://localhost:8000" + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetSigV4SigningName(&sp, "dynamodb") + smithyhttp.SetSigV4ASigningName(&sp, "dynamodb") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + return sp + }(), + }, + }) + return out + }(), + }, nil + } + if _UseFIPS == true { + if _UseDualStack == true { + if _PartitionResult.SupportsFIPS == true { + if _PartitionResult.SupportsDualStack == true { + if exprVal := params.AccountIdEndpointMode; exprVal != nil { + _AccountIdEndpointMode := *exprVal + _ = _AccountIdEndpointMode + if _AccountIdEndpointMode == "required" { + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: AccountIdEndpointMode is required and FIPS is enabled, but FIPS account endpoints are not supported") + } + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://dynamodb-fips.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DualStackDnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS and DualStack are enabled, but this partition does not support one or both") + } + } + if _UseFIPS == true { + if _PartitionResult.SupportsFIPS == true { + if _PartitionResult.Name == "aws-us-gov" { + if exprVal := params.AccountIdEndpointMode; exprVal != nil { + _AccountIdEndpointMode := *exprVal + _ = _AccountIdEndpointMode + if _AccountIdEndpointMode == "required" { + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: AccountIdEndpointMode is required and FIPS is enabled, but FIPS account endpoints are not supported") + } + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://dynamodb.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + if exprVal := params.AccountIdEndpointMode; exprVal != nil { + _AccountIdEndpointMode := *exprVal + _ = _AccountIdEndpointMode + if _AccountIdEndpointMode == "required" { + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: AccountIdEndpointMode is required and FIPS is enabled, but FIPS account endpoints are not supported") + } + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://dynamodb-fips.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS is enabled but this partition does not support FIPS") + } + if _UseDualStack == true { + if _PartitionResult.SupportsDualStack == true { + if exprVal := params.AccountIdEndpointMode; exprVal != nil { + _AccountIdEndpointMode := *exprVal + _ = _AccountIdEndpointMode + if _AccountIdEndpointMode == "required" { + if !(_UseFIPS == true) { + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: AccountIdEndpointMode is required and DualStack is enabled, but DualStack account endpoints are not supported") + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: AccountIdEndpointMode is required and FIPS is enabled, but FIPS account endpoints are not supported") + } + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://dynamodb.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DualStackDnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "DualStack is enabled but this partition does not support DualStack") + } + if exprVal := params.AccountIdEndpointMode; exprVal != nil { + _AccountIdEndpointMode := *exprVal + _ = _AccountIdEndpointMode + if !(_AccountIdEndpointMode == "disabled") { + if _PartitionResult.Name == "aws" { + if !(_UseFIPS == true) { + if !(_UseDualStack == true) { + if exprVal := params.ResourceArn; exprVal != nil { + _ResourceArn := *exprVal + _ = _ResourceArn + if exprVal := awsrulesfn.ParseARN(_ResourceArn); exprVal != nil { + _ParsedArn := *exprVal + _ = _ParsedArn + if _ParsedArn.Service == "dynamodb" { + if rulesfn.IsValidHostLabel(_ParsedArn.Region, false) { + if _ParsedArn.Region == _Region { + if rulesfn.IsValidHostLabel(_ParsedArn.AccountId, false) { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_ParsedArn.AccountId) + out.WriteString(".ddb.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + } + } + } + } + } + } + } + } + } + } + if exprVal := params.AccountIdEndpointMode; exprVal != nil { + _AccountIdEndpointMode := *exprVal + _ = _AccountIdEndpointMode + if !(_AccountIdEndpointMode == "disabled") { + if _PartitionResult.Name == "aws" { + if !(_UseFIPS == true) { + if !(_UseDualStack == true) { + if exprVal := params.ResourceArnList; exprVal != nil { + _ResourceArnList := stringSlice(exprVal) + _ = _ResourceArnList + if exprVal := _ResourceArnList.Get(0); exprVal != nil { + _FirstArn := *exprVal + _ = _FirstArn + if exprVal := awsrulesfn.ParseARN(_FirstArn); exprVal != nil { + _ParsedArn := *exprVal + _ = _ParsedArn + if _ParsedArn.Service == "dynamodb" { + if rulesfn.IsValidHostLabel(_ParsedArn.Region, false) { + if _ParsedArn.Region == _Region { + if rulesfn.IsValidHostLabel(_ParsedArn.AccountId, false) { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_ParsedArn.AccountId) + out.WriteString(".ddb.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + } + } + } + } + } + } + } + } + } + } + } + if exprVal := params.AccountIdEndpointMode; exprVal != nil { + _AccountIdEndpointMode := *exprVal + _ = _AccountIdEndpointMode + if !(_AccountIdEndpointMode == "disabled") { + if _PartitionResult.Name == "aws" { + if !(_UseFIPS == true) { + if !(_UseDualStack == true) { + if exprVal := params.AccountId; exprVal != nil { + _AccountId := *exprVal + _ = _AccountId + if rulesfn.IsValidHostLabel(_AccountId, false) { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_AccountId) + out.WriteString(".ddb.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "Credentials-sourced account ID parameter is invalid") + } + } + } + } + } + } + if exprVal := params.AccountIdEndpointMode; exprVal != nil { + _AccountIdEndpointMode := *exprVal + _ = _AccountIdEndpointMode + if _AccountIdEndpointMode == "required" { + if !(_UseFIPS == true) { + if !(_UseDualStack == true) { + if _PartitionResult.Name == "aws" { + return endpoint, fmt.Errorf("endpoint rule error, %s", "AccountIdEndpointMode is required but no AccountID was provided or able to be loaded") + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: AccountIdEndpointMode is required but account endpoints are not supported in this partition") + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: AccountIdEndpointMode is required and DualStack is enabled, but DualStack account endpoints are not supported") + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: AccountIdEndpointMode is required and FIPS is enabled, but FIPS account endpoints are not supported") + } + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://dynamodb.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Missing Region") +} + +type endpointParamsBinder interface { + bindEndpointParams(*EndpointParameters) +} + +func bindEndpointParams(ctx context.Context, input interface{}, options Options) *EndpointParameters { + params := &EndpointParameters{} + + params.Region = bindRegion(options.Region) + params.UseDualStack = aws.Bool(options.EndpointOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled) + params.UseFIPS = aws.Bool(options.EndpointOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled) + params.Endpoint = options.BaseEndpoint + params.AccountId = resolveAccountID(getIdentity(ctx), options.AccountIDEndpointMode) + params.AccountIdEndpointMode = aws.String(string(options.AccountIDEndpointMode)) + + if b, ok := input.(endpointParamsBinder); ok { + b.bindEndpointParams(params) + } + + return params +} + +type resolveEndpointV2Middleware struct { + options Options +} + +func (*resolveEndpointV2Middleware) ID() string { + return "ResolveEndpointV2" +} + +func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "ResolveEndpoint") + defer span.End() + + if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { + return next.HandleFinalize(ctx, in) + } + + if err := checkAccountID(getIdentity(ctx), m.options.AccountIDEndpointMode); err != nil { + return out, metadata, fmt.Errorf("invalid accountID set: %w", err) + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.options.EndpointResolverV2 == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + params := bindEndpointParams(ctx, getOperationInput(ctx), m.options) + endpt, err := timeOperationMetric(ctx, "client.call.resolve_endpoint_duration", + func() (smithyendpoints.Endpoint, error) { + return m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params) + }) + if err != nil { + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + span.SetProperty("client.call.resolved_endpoint", endpt.URI.String()) + + if endpt.URI.RawPath == "" && req.URL.RawPath != "" { + endpt.URI.RawPath = endpt.URI.Path + } + req.URL.Scheme = endpt.URI.Scheme + req.URL.Host = endpt.URI.Host + req.URL.Path = smithyhttp.JoinPath(endpt.URI.Path, req.URL.Path) + req.URL.RawPath = smithyhttp.JoinPath(endpt.URI.RawPath, req.URL.RawPath) + for k := range endpt.Headers { + req.Header.Set(k, endpt.Headers.Get(k)) + } + + rscheme := getResolvedAuthScheme(ctx) + if rscheme == nil { + return out, metadata, fmt.Errorf("no resolved auth scheme") + } + + opts, _ := smithyauth.GetAuthOptions(&endpt.Properties) + for _, o := range opts { + rscheme.SignerProperties.SetAll(&o.SignerProperties) + } + + span.End() + return next.HandleFinalize(ctx, in) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/generated.json new file mode 100644 index 0000000000..b2746adbd1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/generated.json @@ -0,0 +1,93 @@ +{ + "dependencies": { + "github.com/aws/aws-sdk-go-v2": "v1.4.0", + "github.com/aws/aws-sdk-go-v2/internal/configsources": "v0.0.0-00010101000000-000000000000", + "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2": "v2.0.0-00010101000000-000000000000", + "github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding": "v1.0.5", + "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery": "v0.0.0-00010101000000-000000000000", + "github.com/aws/smithy-go": "v1.4.0" + }, + "files": [ + "api_client.go", + "api_client_test.go", + "api_op_BatchExecuteStatement.go", + "api_op_BatchGetItem.go", + "api_op_BatchWriteItem.go", + "api_op_CreateBackup.go", + "api_op_CreateGlobalTable.go", + "api_op_CreateTable.go", + "api_op_DeleteBackup.go", + "api_op_DeleteItem.go", + "api_op_DeleteResourcePolicy.go", + "api_op_DeleteTable.go", + "api_op_DescribeBackup.go", + "api_op_DescribeContinuousBackups.go", + "api_op_DescribeContributorInsights.go", + "api_op_DescribeEndpoints.go", + "api_op_DescribeExport.go", + "api_op_DescribeGlobalTable.go", + "api_op_DescribeGlobalTableSettings.go", + "api_op_DescribeImport.go", + "api_op_DescribeKinesisStreamingDestination.go", + "api_op_DescribeLimits.go", + "api_op_DescribeTable.go", + "api_op_DescribeTableReplicaAutoScaling.go", + "api_op_DescribeTimeToLive.go", + "api_op_DisableKinesisStreamingDestination.go", + "api_op_EnableKinesisStreamingDestination.go", + "api_op_ExecuteStatement.go", + "api_op_ExecuteTransaction.go", + "api_op_ExportTableToPointInTime.go", + "api_op_GetItem.go", + "api_op_GetResourcePolicy.go", + "api_op_ImportTable.go", + "api_op_ListBackups.go", + "api_op_ListContributorInsights.go", + "api_op_ListExports.go", + "api_op_ListGlobalTables.go", + "api_op_ListImports.go", + "api_op_ListTables.go", + "api_op_ListTagsOfResource.go", + "api_op_PutItem.go", + "api_op_PutResourcePolicy.go", + "api_op_Query.go", + "api_op_RestoreTableFromBackup.go", + "api_op_RestoreTableToPointInTime.go", + "api_op_Scan.go", + "api_op_TagResource.go", + "api_op_TransactGetItems.go", + "api_op_TransactWriteItems.go", + "api_op_UntagResource.go", + "api_op_UpdateContinuousBackups.go", + "api_op_UpdateContributorInsights.go", + "api_op_UpdateGlobalTable.go", + "api_op_UpdateGlobalTableSettings.go", + "api_op_UpdateItem.go", + "api_op_UpdateKinesisStreamingDestination.go", + "api_op_UpdateTable.go", + "api_op_UpdateTableReplicaAutoScaling.go", + "api_op_UpdateTimeToLive.go", + "auth.go", + "deserializers.go", + "doc.go", + "endpoints.go", + "endpoints_config_test.go", + "endpoints_test.go", + "generated.json", + "internal/endpoints/endpoints.go", + "internal/endpoints/endpoints_test.go", + "options.go", + "protocol_test.go", + "serializers.go", + "snapshot_test.go", + "sra_operation_order_test.go", + "types/enums.go", + "types/errors.go", + "types/types.go", + "types/types_exported_test.go", + "validators.go" + ], + "go": "1.22", + "module": "github.com/aws/aws-sdk-go-v2/service/dynamodb", + "unstable": false +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/go_module_metadata.go new file mode 100644 index 0000000000..6e1c54415c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package dynamodb + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.50.1" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/handwritten_paginators.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/handwritten_paginators.go new file mode 100644 index 0000000000..399b13e7ad --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/handwritten_paginators.go @@ -0,0 +1,88 @@ +package dynamodb + +import ( + "context" + "fmt" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" +) + +// BatchGetItemPaginatorOptions is the paginator options for BatchGetItem +type BatchGetItemPaginatorOptions struct { + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// BatchGetItemPaginator is a paginator for BatchGetItem +type BatchGetItemPaginator struct { + options BatchGetItemPaginatorOptions + client BatchGetItemAPIClient + params *BatchGetItemInput + firstPage bool + requestItems map[string]types.KeysAndAttributes + isTruncated bool +} + +// BatchGetItemAPIClient is a client that implements the BatchGetItem operation. +type BatchGetItemAPIClient interface { + BatchGetItem(context.Context, *BatchGetItemInput, ...func(*Options)) (*BatchGetItemOutput, error) +} + +// NewBatchGetItemPaginator returns a new BatchGetItemPaginator +func NewBatchGetItemPaginator(client BatchGetItemAPIClient, params *BatchGetItemInput, optFns ...func(*BatchGetItemPaginatorOptions)) *BatchGetItemPaginator { + if params == nil { + params = &BatchGetItemInput{} + } + + options := BatchGetItemPaginatorOptions{} + + for _, fn := range optFns { + fn(&options) + } + + return &BatchGetItemPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + requestItems: params.RequestItems, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *BatchGetItemPaginator) HasMorePages() bool { + return p.firstPage || p.isTruncated +} + +// NextPage retrieves the next BatchGetItem page. +func (p *BatchGetItemPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*BatchGetItemOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.RequestItems = p.requestItems + + result, err := p.client.BatchGetItem(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.requestItems + p.isTruncated = len(result.UnprocessedKeys) != 0 + p.requestItems = nil + if p.isTruncated { + p.requestItems = result.UnprocessedKeys + } + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.requestItems != nil && + awsutil.DeepEqual(prevToken, p.requestItems) { + p.isTruncated = false + } + + return result, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/internal/customizations/checksum.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/internal/customizations/checksum.go new file mode 100644 index 0000000000..6b3171e70b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/internal/customizations/checksum.go @@ -0,0 +1,119 @@ +package customizations + +import ( + "context" + "fmt" + "hash" + "hash/crc32" + "io" + "net/http" + "strconv" + + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// AddValidateResponseChecksumOptions provides the options for the +// AddValidateResponseChecksum middleware setup. +type AddValidateResponseChecksumOptions struct { + Disable bool +} + +// AddValidateResponseChecksum adds the Checksum to the middleware +// stack if checksum is not disabled. +func AddValidateResponseChecksum(stack *middleware.Stack, options AddValidateResponseChecksumOptions) error { + if options.Disable { + return nil + } + + return stack.Deserialize.Add(&Checksum{}, middleware.After) +} + +// Checksum provides a middleware to validate the DynamoDB response +// body's integrity by comparing the computed CRC32 checksum with the value +// provided in the HTTP response header. +type Checksum struct{} + +// ID returns the middleware ID. +func (*Checksum) ID() string { return "DynamoDB:ResponseChecksumValidation" } + +// HandleDeserialize implements the Deserialize middleware handle method. +func (m *Checksum) HandleDeserialize( + ctx context.Context, input middleware.DeserializeInput, next middleware.DeserializeHandler, +) ( + output middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + output, metadata, err = next.HandleDeserialize(ctx, input) + if err != nil { + return output, metadata, err + } + + resp, ok := output.RawResponse.(*smithyhttp.Response) + if !ok { + return output, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("unknown response type %T", output.RawResponse), + } + } + + expectChecksum, ok, err := getCRC32Checksum(resp.Header) + if err != nil { + return output, metadata, &smithy.DeserializationError{Err: err} + } + + resp.Body = wrapCRC32ChecksumValidate(expectChecksum, resp.Body) + + return output, metadata, err +} + +const crc32ChecksumHeader = "X-Amz-Crc32" + +func getCRC32Checksum(header http.Header) (uint32, bool, error) { + v := header.Get(crc32ChecksumHeader) + if len(v) == 0 { + return 0, false, nil + } + + c, err := strconv.ParseUint(v, 10, 32) + if err != nil { + return 0, false, fmt.Errorf("unable to parse checksum header %v, %w", v, err) + } + + return uint32(c), true, nil +} + +// crc32ChecksumValidate provides wrapping of an io.Reader to validate the CRC32 +// checksum of the bytes read against the expected checksum. +type crc32ChecksumValidate struct { + io.Reader + + closer io.Closer + expect uint32 + hash hash.Hash32 +} + +// wrapCRC32ChecksumValidate constructs a new crc32ChecksumValidate that will +// compute a running CRC32 checksum of the bytes read. +func wrapCRC32ChecksumValidate(checksum uint32, reader io.ReadCloser) *crc32ChecksumValidate { + hash := crc32.NewIEEE() + return &crc32ChecksumValidate{ + expect: checksum, + Reader: io.TeeReader(reader, hash), + closer: reader, + hash: hash, + } +} + +// Close validates the wrapped reader's CRC32 checksum. Returns an error if +// the read checksum does not match the expected checksum. +// +// May return an error if the wrapped io.Reader's close returns an error, if it +// implements close. +func (c *crc32ChecksumValidate) Close() error { + if actual := c.hash.Sum32(); actual != c.expect { + c.closer.Close() + return fmt.Errorf("response did not match expected checksum, %d, %d", c.expect, actual) + } + + return c.closer.Close() +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/internal/customizations/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/internal/customizations/doc.go new file mode 100644 index 0000000000..b023f04bef --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/internal/customizations/doc.go @@ -0,0 +1,42 @@ +/* +Package customizations provides customizations for the Amazon DynamoDB API client. + +The DynamoDB API client uses two customizations, response checksum validation, +and manual content-encoding: gzip support. + +# Middleware layering + +Checksum validation needs to be performed first in deserialization chain +on top of gzip decompression. Since the behavior of Deserialization is +in reverse order to the other stack steps its easier to consider that +"after" means "before". + + HTTP Response -> Checksum -> gzip decompress -> deserialize + +# Response checksum validation + +DynamoDB responses can include a X-Amz-Crc32 header with the CRC32 checksum +value of the response body. If the response body is content-encoding: gzip, the +checksum is of the gzipped response content. + +If the header is present, the SDK should validate that the response payload +computed CRC32 checksum matches the value provided in the header. The checksum +header is based on the original payload provided returned by the service. Which +means that if the response is gzipped the checksum is of the gzipped response, +not the decompressed response bytes. + +Customization option: + + DisableValidateResponseChecksum (Enabled by Default) + +# Accept encoding gzip + +For customization around accept encoding, dynamodb client uses the middlewares +defined at service/internal/accept-encoding. Please refer to the documentation for +`accept-encoding` package for more details. + +Customization option: + + EnableAcceptEncodingGzip (Disabled by Default) +*/ +package customizations diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/internal/endpoints/endpoints.go new file mode 100644 index 0000000000..09e2b0eed0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/internal/endpoints/endpoints.go @@ -0,0 +1,596 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package endpoints + +import ( + "github.com/aws/aws-sdk-go-v2/aws" + endpoints "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2" + "github.com/aws/smithy-go/logging" + "regexp" +) + +// Options is the endpoint resolver configuration options +type Options struct { + // Logger is a logging implementation that log events should be sent to. + Logger logging.Logger + + // LogDeprecated indicates that deprecated endpoints should be logged to the + // provided logger. + LogDeprecated bool + + // ResolvedRegion is used to override the region to be resolved, rather then the + // using the value passed to the ResolveEndpoint method. This value is used by the + // SDK to translate regions like fips-us-east-1 or us-east-1-fips to an alternative + // name. You must not set this value directly in your application. + ResolvedRegion string + + // DisableHTTPS informs the resolver to return an endpoint that does not use the + // HTTPS scheme. + DisableHTTPS bool + + // UseDualStackEndpoint specifies the resolver must resolve a dual-stack endpoint. + UseDualStackEndpoint aws.DualStackEndpointState + + // UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint. + UseFIPSEndpoint aws.FIPSEndpointState +} + +func (o Options) GetResolvedRegion() string { + return o.ResolvedRegion +} + +func (o Options) GetDisableHTTPS() bool { + return o.DisableHTTPS +} + +func (o Options) GetUseDualStackEndpoint() aws.DualStackEndpointState { + return o.UseDualStackEndpoint +} + +func (o Options) GetUseFIPSEndpoint() aws.FIPSEndpointState { + return o.UseFIPSEndpoint +} + +func transformToSharedOptions(options Options) endpoints.Options { + return endpoints.Options{ + Logger: options.Logger, + LogDeprecated: options.LogDeprecated, + ResolvedRegion: options.ResolvedRegion, + DisableHTTPS: options.DisableHTTPS, + UseDualStackEndpoint: options.UseDualStackEndpoint, + UseFIPSEndpoint: options.UseFIPSEndpoint, + } +} + +// Resolver DynamoDB endpoint resolver +type Resolver struct { + partitions endpoints.Partitions +} + +// ResolveEndpoint resolves the service endpoint for the given region and options +func (r *Resolver) ResolveEndpoint(region string, options Options) (endpoint aws.Endpoint, err error) { + if len(region) == 0 { + return endpoint, &aws.MissingRegionError{} + } + + opt := transformToSharedOptions(options) + return r.partitions.ResolveEndpoint(region, opt) +} + +// New returns a new Resolver +func New() *Resolver { + return &Resolver{ + partitions: defaultPartitions, + } +} + +var partitionRegexp = struct { + Aws *regexp.Regexp + AwsCn *regexp.Regexp + AwsEusc *regexp.Regexp + AwsIso *regexp.Regexp + AwsIsoB *regexp.Regexp + AwsIsoE *regexp.Regexp + AwsIsoF *regexp.Regexp + AwsUsGov *regexp.Regexp +}{ + + Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af|il|mx)\\-\\w+\\-\\d+$"), + AwsCn: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"), + AwsEusc: regexp.MustCompile("^eusc\\-(de)\\-\\w+\\-\\d+$"), + AwsIso: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"), + AwsIsoB: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"), + AwsIsoE: regexp.MustCompile("^eu\\-isoe\\-\\w+\\-\\d+$"), + AwsIsoF: regexp.MustCompile("^us\\-isof\\-\\w+\\-\\d+$"), + AwsUsGov: regexp.MustCompile("^us\\-gov\\-\\w+\\-\\d+$"), +} + +var defaultPartitions = endpoints.Partitions{ + { + ID: "aws", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "dynamodb.{region}.api.aws", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "dynamodb-fips.{region}.amazonaws.com", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "dynamodb-fips.{region}.api.aws", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "dynamodb.{region}.amazonaws.com", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.Aws, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "af-south-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-east-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-east-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-northeast-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-northeast-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-northeast-3", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-south-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-south-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-southeast-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-southeast-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-southeast-3", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-southeast-4", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-southeast-5", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-southeast-6", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-southeast-7", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ca-central-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ca-central-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "dynamodb-fips.ca-central-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "ca-central-1-fips", + }: endpoints.Endpoint{ + Hostname: "dynamodb-fips.ca-central-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ca-central-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "ca-west-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ca-west-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "dynamodb-fips.ca-west-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "ca-west-1-fips", + }: endpoints.Endpoint{ + Hostname: "dynamodb-fips.ca-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ca-west-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "eu-central-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-central-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-north-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-south-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-south-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-west-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-west-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-west-3", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "il-central-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "local", + }: endpoints.Endpoint{ + Hostname: "localhost:8000", + Protocols: []string{"http"}, + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-1", + }, + }, + endpoints.EndpointKey{ + Region: "me-central-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "me-south-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "mx-central-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "sa-east-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-east-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-east-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "dynamodb-fips.us-east-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-east-1-fips", + }: endpoints.Endpoint{ + Hostname: "dynamodb-fips.us-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "us-east-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-east-2", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "dynamodb-fips.us-east-2.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-east-2-fips", + }: endpoints.Endpoint{ + Hostname: "dynamodb-fips.us-east-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-2", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "us-west-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-west-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "dynamodb-fips.us-west-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-west-1-fips", + }: endpoints.Endpoint{ + Hostname: "dynamodb-fips.us-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-west-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "us-west-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-west-2", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "dynamodb-fips.us-west-2.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-west-2-fips", + }: endpoints.Endpoint{ + Hostname: "dynamodb-fips.us-west-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-west-2", + }, + Deprecated: aws.TrueTernary, + }, + }, + }, + { + ID: "aws-cn", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "dynamodb.{region}.api.amazonwebservices.com.cn", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "dynamodb-fips.{region}.amazonaws.com.cn", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "dynamodb-fips.{region}.api.amazonwebservices.com.cn", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "dynamodb.{region}.amazonaws.com.cn", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsCn, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "cn-north-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "cn-northwest-1", + }: endpoints.Endpoint{}, + }, + }, + { + ID: "aws-eusc", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "dynamodb-fips.{region}.amazonaws.eu", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "dynamodb.{region}.amazonaws.eu", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsEusc, + IsRegionalized: true, + }, + { + ID: "aws-iso", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "dynamodb-fips.{region}.c2s.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "dynamodb.{region}.c2s.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIso, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "us-iso-east-1", + }: endpoints.Endpoint{ + Protocols: []string{"http", "https"}, + }, + endpoints.EndpointKey{ + Region: "us-iso-west-1", + }: endpoints.Endpoint{}, + }, + }, + { + ID: "aws-iso-b", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "dynamodb-fips.{region}.sc2s.sgov.gov", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "dynamodb.{region}.sc2s.sgov.gov", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoB, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "us-isob-east-1", + }: endpoints.Endpoint{}, + }, + }, + { + ID: "aws-iso-e", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "dynamodb-fips.{region}.cloud.adc-e.uk", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "dynamodb.{region}.cloud.adc-e.uk", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoE, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "eu-isoe-west-1", + }: endpoints.Endpoint{}, + }, + }, + { + ID: "aws-iso-f", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "dynamodb-fips.{region}.csp.hci.ic.gov", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "dynamodb.{region}.csp.hci.ic.gov", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoF, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "us-isof-east-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-isof-south-1", + }: endpoints.Endpoint{}, + }, + }, + { + ID: "aws-us-gov", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "dynamodb.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "dynamodb.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "dynamodb-fips.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "dynamodb.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsUsGov, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "us-gov-east-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-gov-east-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "dynamodb-fips.us-gov-east-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-gov-east-1-fips", + }: endpoints.Endpoint{ + Hostname: "dynamodb-fips.us-gov-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "us-gov-west-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-gov-west-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "dynamodb-fips.us-gov-west-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-gov-west-1-fips", + }: endpoints.Endpoint{ + Hostname: "dynamodb-fips.us-gov-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: aws.TrueTernary, + }, + }, + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/options.go new file mode 100644 index 0000000000..743c048fb0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/options.go @@ -0,0 +1,257 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package dynamodb + +import ( + "context" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" + smithyauth "github.com/aws/smithy-go/auth" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/metrics" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/tracing" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net/http" +) + +type HTTPClient interface { + Do(*http.Request) (*http.Response, error) +} + +type Options struct { + // Set of options to modify how an operation is invoked. These apply to all + // operations invoked for this client. Use functional options on operation call to + // modify this list for per operation behavior. + APIOptions []func(*middleware.Stack) error + + // Indicates how aws account ID is applied in endpoint2.0 routing + AccountIDEndpointMode aws.AccountIDEndpointMode + + // The optional application specific identifier appended to the User-Agent header. + AppID string + + // This endpoint will be given as input to an EndpointResolverV2. It is used for + // providing a custom base endpoint that is subject to modifications by the + // processing EndpointResolverV2. + BaseEndpoint *string + + // Configures the events that will be sent to the configured logger. + ClientLogMode aws.ClientLogMode + + // The credentials object to use when signing requests. + Credentials aws.CredentialsProvider + + // The configuration DefaultsMode that the SDK should use when constructing the + // clients initial default settings. + DefaultsMode aws.DefaultsMode + + // Allows you to disable the client's validation of response integrity using CRC32 + // checksum. Enabled by default. + DisableValidateResponseChecksum bool + + // Allows you to enable the client's support for compressed gzip responses. + // Disabled by default. + EnableAcceptEncodingGzip bool + + // Allows configuring endpoint discovery + EndpointDiscovery EndpointDiscoveryOptions + + // The endpoint options to be used when attempting to resolve an endpoint. + EndpointOptions EndpointResolverOptions + + // The service endpoint resolver. + // + // Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a + // value for this field will likely prevent you from using any endpoint-related + // service features released after the introduction of EndpointResolverV2 and + // BaseEndpoint. + // + // To migrate an EndpointResolver implementation that uses a custom endpoint, set + // the client option BaseEndpoint instead. + EndpointResolver EndpointResolver + + // Resolves the endpoint used for a particular service operation. This should be + // used over the deprecated EndpointResolver. + EndpointResolverV2 EndpointResolverV2 + + // Signature Version 4 (SigV4) Signer + HTTPSignerV4 HTTPSignerV4 + + // Provides idempotency tokens values that will be automatically populated into + // idempotent API operations. + IdempotencyTokenProvider IdempotencyTokenProvider + + // The logger writer interface to write logging messages to. + Logger logging.Logger + + // The client meter provider. + MeterProvider metrics.MeterProvider + + // The region to send requests to. (Required) + Region string + + // RetryMaxAttempts specifies the maximum number attempts an API client will call + // an operation that fails with a retryable error. A value of 0 is ignored, and + // will not be used to configure the API client created default retryer, or modify + // per operation call's retry max attempts. + // + // If specified in an operation call's functional options with a value that is + // different than the constructed client's Options, the Client's Retryer will be + // wrapped to use the operation's specific RetryMaxAttempts value. + RetryMaxAttempts int + + // RetryMode specifies the retry mode the API client will be created with, if + // Retryer option is not also specified. + // + // When creating a new API Clients this member will only be used if the Retryer + // Options member is nil. This value will be ignored if Retryer is not nil. + // + // Currently does not support per operation call overrides, may in the future. + RetryMode aws.RetryMode + + // Retryer guides how HTTP requests should be retried in case of recoverable + // failures. When nil the API client will use a default retryer. The kind of + // default retry created by the API client can be changed with the RetryMode + // option. + Retryer aws.Retryer + + // The RuntimeEnvironment configuration, only populated if the DefaultsMode is set + // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig . You + // should not populate this structure programmatically, or rely on the values here + // within your applications. + RuntimeEnvironment aws.RuntimeEnvironment + + // The client tracer provider. + TracerProvider tracing.TracerProvider + + // The initial DefaultsMode used when the client options were constructed. If the + // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved + // value was at that point in time. + // + // Currently does not support per operation call overrides, may in the future. + resolvedDefaultsMode aws.DefaultsMode + + // The HTTP client to invoke API calls with. Defaults to client's default HTTP + // implementation if nil. + HTTPClient HTTPClient + + // Client registry of operation interceptors. + Interceptors smithyhttp.InterceptorRegistry + + // The auth scheme resolver which determines how to authenticate for each + // operation. + AuthSchemeResolver AuthSchemeResolver + + // The list of auth schemes supported by the client. + AuthSchemes []smithyhttp.AuthScheme + + // Priority list of preferred auth scheme names (e.g. sigv4a). + AuthSchemePreference []string +} + +// Copy creates a clone where the APIOptions list is deep copied. +func (o Options) Copy() Options { + to := o + to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions)) + copy(to.APIOptions, o.APIOptions) + to.Interceptors = o.Interceptors.Copy() + + return to +} + +func (o Options) GetIdentityResolver(schemeID string) smithyauth.IdentityResolver { + if schemeID == "aws.auth#sigv4" { + return getSigV4IdentityResolver(o) + } + if schemeID == "smithy.api#noAuth" { + return &smithyauth.AnonymousIdentityResolver{} + } + return nil +} + +// WithAPIOptions returns a functional option for setting the Client's APIOptions +// option. +func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { + return func(o *Options) { + o.APIOptions = append(o.APIOptions, optFns...) + } +} + +// Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for +// this field will likely prevent you from using any endpoint-related service +// features released after the introduction of EndpointResolverV2 and BaseEndpoint. +// +// To migrate an EndpointResolver implementation that uses a custom endpoint, set +// the client option BaseEndpoint instead. +func WithEndpointResolver(v EndpointResolver) func(*Options) { + return func(o *Options) { + o.EndpointResolver = v + } +} + +// WithEndpointResolverV2 returns a functional option for setting the Client's +// EndpointResolverV2 option. +func WithEndpointResolverV2(v EndpointResolverV2) func(*Options) { + return func(o *Options) { + o.EndpointResolverV2 = v + } +} + +func getSigV4IdentityResolver(o Options) smithyauth.IdentityResolver { + if o.Credentials != nil { + return &internalauthsmithy.CredentialsProviderAdapter{Provider: o.Credentials} + } + return nil +} + +// WithSigV4SigningName applies an override to the authentication workflow to +// use the given signing name for SigV4-authenticated operations. +// +// This is an advanced setting. The value here is FINAL, taking precedence over +// the resolved signing name from both auth scheme resolution and endpoint +// resolution. +func WithSigV4SigningName(name string) func(*Options) { + fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, + ) { + return next.HandleInitialize(awsmiddleware.SetSigningName(ctx, name), in) + } + return func(o *Options) { + o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error { + return s.Initialize.Add( + middleware.InitializeMiddlewareFunc("withSigV4SigningName", fn), + middleware.Before, + ) + }) + } +} + +// WithSigV4SigningRegion applies an override to the authentication workflow to +// use the given signing region for SigV4-authenticated operations. +// +// This is an advanced setting. The value here is FINAL, taking precedence over +// the resolved signing region from both auth scheme resolution and endpoint +// resolution. +func WithSigV4SigningRegion(region string) func(*Options) { + fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, + ) { + return next.HandleInitialize(awsmiddleware.SetSigningRegion(ctx, region), in) + } + return func(o *Options) { + o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error { + return s.Initialize.Add( + middleware.InitializeMiddlewareFunc("withSigV4SigningRegion", fn), + middleware.Before, + ) + }) + } +} + +func ignoreAnonymousAuth(options *Options) { + if aws.IsCredentialsProvider(options.Credentials, (*aws.AnonymousCredentials)(nil)) { + options.Credentials = nil + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/serializers.go new file mode 100644 index 0000000000..366d529669 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/serializers.go @@ -0,0 +1,7458 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package dynamodb + +import ( + "bytes" + "context" + "fmt" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/encoding/httpbinding" + smithyjson "github.com/aws/smithy-go/encoding/json" + "github.com/aws/smithy-go/middleware" + smithytime "github.com/aws/smithy-go/time" + "github.com/aws/smithy-go/tracing" + smithyhttp "github.com/aws/smithy-go/transport/http" + "math" + "path" +) + +type awsAwsjson10_serializeOpBatchExecuteStatement struct { +} + +func (*awsAwsjson10_serializeOpBatchExecuteStatement) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson10_serializeOpBatchExecuteStatement) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*BatchExecuteStatementInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") + httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.BatchExecuteStatement") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson10_serializeOpDocumentBatchExecuteStatementInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson10_serializeOpBatchGetItem struct { +} + +func (*awsAwsjson10_serializeOpBatchGetItem) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson10_serializeOpBatchGetItem) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*BatchGetItemInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") + httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.BatchGetItem") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson10_serializeOpDocumentBatchGetItemInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson10_serializeOpBatchWriteItem struct { +} + +func (*awsAwsjson10_serializeOpBatchWriteItem) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson10_serializeOpBatchWriteItem) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*BatchWriteItemInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") + httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.BatchWriteItem") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson10_serializeOpDocumentBatchWriteItemInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson10_serializeOpCreateBackup struct { +} + +func (*awsAwsjson10_serializeOpCreateBackup) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson10_serializeOpCreateBackup) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CreateBackupInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") + httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.CreateBackup") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson10_serializeOpDocumentCreateBackupInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson10_serializeOpCreateGlobalTable struct { +} + +func (*awsAwsjson10_serializeOpCreateGlobalTable) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson10_serializeOpCreateGlobalTable) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CreateGlobalTableInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") + httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.CreateGlobalTable") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson10_serializeOpDocumentCreateGlobalTableInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson10_serializeOpCreateTable struct { +} + +func (*awsAwsjson10_serializeOpCreateTable) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson10_serializeOpCreateTable) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CreateTableInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") + httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.CreateTable") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson10_serializeOpDocumentCreateTableInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson10_serializeOpDeleteBackup struct { +} + +func (*awsAwsjson10_serializeOpDeleteBackup) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson10_serializeOpDeleteBackup) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteBackupInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") + httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.DeleteBackup") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson10_serializeOpDocumentDeleteBackupInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson10_serializeOpDeleteItem struct { +} + +func (*awsAwsjson10_serializeOpDeleteItem) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson10_serializeOpDeleteItem) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteItemInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") + httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.DeleteItem") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson10_serializeOpDocumentDeleteItemInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson10_serializeOpDeleteResourcePolicy struct { +} + +func (*awsAwsjson10_serializeOpDeleteResourcePolicy) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson10_serializeOpDeleteResourcePolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteResourcePolicyInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") + httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.DeleteResourcePolicy") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson10_serializeOpDocumentDeleteResourcePolicyInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson10_serializeOpDeleteTable struct { +} + +func (*awsAwsjson10_serializeOpDeleteTable) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson10_serializeOpDeleteTable) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteTableInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") + httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.DeleteTable") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson10_serializeOpDocumentDeleteTableInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson10_serializeOpDescribeBackup struct { +} + +func (*awsAwsjson10_serializeOpDescribeBackup) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson10_serializeOpDescribeBackup) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DescribeBackupInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") + httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.DescribeBackup") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson10_serializeOpDocumentDescribeBackupInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson10_serializeOpDescribeContinuousBackups struct { +} + +func (*awsAwsjson10_serializeOpDescribeContinuousBackups) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson10_serializeOpDescribeContinuousBackups) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DescribeContinuousBackupsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") + httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.DescribeContinuousBackups") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson10_serializeOpDocumentDescribeContinuousBackupsInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson10_serializeOpDescribeContributorInsights struct { +} + +func (*awsAwsjson10_serializeOpDescribeContributorInsights) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson10_serializeOpDescribeContributorInsights) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DescribeContributorInsightsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") + httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.DescribeContributorInsights") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson10_serializeOpDocumentDescribeContributorInsightsInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson10_serializeOpDescribeEndpoints struct { +} + +func (*awsAwsjson10_serializeOpDescribeEndpoints) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson10_serializeOpDescribeEndpoints) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DescribeEndpointsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") + httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.DescribeEndpoints") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson10_serializeOpDocumentDescribeEndpointsInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson10_serializeOpDescribeExport struct { +} + +func (*awsAwsjson10_serializeOpDescribeExport) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson10_serializeOpDescribeExport) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DescribeExportInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") + httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.DescribeExport") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson10_serializeOpDocumentDescribeExportInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson10_serializeOpDescribeGlobalTable struct { +} + +func (*awsAwsjson10_serializeOpDescribeGlobalTable) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson10_serializeOpDescribeGlobalTable) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DescribeGlobalTableInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") + httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.DescribeGlobalTable") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson10_serializeOpDocumentDescribeGlobalTableInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson10_serializeOpDescribeGlobalTableSettings struct { +} + +func (*awsAwsjson10_serializeOpDescribeGlobalTableSettings) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson10_serializeOpDescribeGlobalTableSettings) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DescribeGlobalTableSettingsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") + httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.DescribeGlobalTableSettings") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson10_serializeOpDocumentDescribeGlobalTableSettingsInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson10_serializeOpDescribeImport struct { +} + +func (*awsAwsjson10_serializeOpDescribeImport) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson10_serializeOpDescribeImport) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DescribeImportInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") + httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.DescribeImport") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson10_serializeOpDocumentDescribeImportInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson10_serializeOpDescribeKinesisStreamingDestination struct { +} + +func (*awsAwsjson10_serializeOpDescribeKinesisStreamingDestination) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson10_serializeOpDescribeKinesisStreamingDestination) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DescribeKinesisStreamingDestinationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") + httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.DescribeKinesisStreamingDestination") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson10_serializeOpDocumentDescribeKinesisStreamingDestinationInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson10_serializeOpDescribeLimits struct { +} + +func (*awsAwsjson10_serializeOpDescribeLimits) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson10_serializeOpDescribeLimits) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DescribeLimitsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") + httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.DescribeLimits") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson10_serializeOpDocumentDescribeLimitsInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson10_serializeOpDescribeTable struct { +} + +func (*awsAwsjson10_serializeOpDescribeTable) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson10_serializeOpDescribeTable) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DescribeTableInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") + httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.DescribeTable") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson10_serializeOpDocumentDescribeTableInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson10_serializeOpDescribeTableReplicaAutoScaling struct { +} + +func (*awsAwsjson10_serializeOpDescribeTableReplicaAutoScaling) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson10_serializeOpDescribeTableReplicaAutoScaling) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DescribeTableReplicaAutoScalingInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") + httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.DescribeTableReplicaAutoScaling") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson10_serializeOpDocumentDescribeTableReplicaAutoScalingInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson10_serializeOpDescribeTimeToLive struct { +} + +func (*awsAwsjson10_serializeOpDescribeTimeToLive) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson10_serializeOpDescribeTimeToLive) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DescribeTimeToLiveInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") + httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.DescribeTimeToLive") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson10_serializeOpDocumentDescribeTimeToLiveInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson10_serializeOpDisableKinesisStreamingDestination struct { +} + +func (*awsAwsjson10_serializeOpDisableKinesisStreamingDestination) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson10_serializeOpDisableKinesisStreamingDestination) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DisableKinesisStreamingDestinationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") + httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.DisableKinesisStreamingDestination") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson10_serializeOpDocumentDisableKinesisStreamingDestinationInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson10_serializeOpEnableKinesisStreamingDestination struct { +} + +func (*awsAwsjson10_serializeOpEnableKinesisStreamingDestination) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson10_serializeOpEnableKinesisStreamingDestination) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*EnableKinesisStreamingDestinationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") + httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.EnableKinesisStreamingDestination") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson10_serializeOpDocumentEnableKinesisStreamingDestinationInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson10_serializeOpExecuteStatement struct { +} + +func (*awsAwsjson10_serializeOpExecuteStatement) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson10_serializeOpExecuteStatement) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ExecuteStatementInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") + httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.ExecuteStatement") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson10_serializeOpDocumentExecuteStatementInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson10_serializeOpExecuteTransaction struct { +} + +func (*awsAwsjson10_serializeOpExecuteTransaction) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson10_serializeOpExecuteTransaction) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ExecuteTransactionInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") + httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.ExecuteTransaction") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson10_serializeOpDocumentExecuteTransactionInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson10_serializeOpExportTableToPointInTime struct { +} + +func (*awsAwsjson10_serializeOpExportTableToPointInTime) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson10_serializeOpExportTableToPointInTime) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ExportTableToPointInTimeInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") + httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.ExportTableToPointInTime") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson10_serializeOpDocumentExportTableToPointInTimeInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson10_serializeOpGetItem struct { +} + +func (*awsAwsjson10_serializeOpGetItem) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson10_serializeOpGetItem) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetItemInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") + httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.GetItem") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson10_serializeOpDocumentGetItemInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson10_serializeOpGetResourcePolicy struct { +} + +func (*awsAwsjson10_serializeOpGetResourcePolicy) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson10_serializeOpGetResourcePolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetResourcePolicyInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") + httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.GetResourcePolicy") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson10_serializeOpDocumentGetResourcePolicyInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson10_serializeOpImportTable struct { +} + +func (*awsAwsjson10_serializeOpImportTable) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson10_serializeOpImportTable) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ImportTableInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") + httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.ImportTable") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson10_serializeOpDocumentImportTableInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson10_serializeOpListBackups struct { +} + +func (*awsAwsjson10_serializeOpListBackups) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson10_serializeOpListBackups) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListBackupsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") + httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.ListBackups") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson10_serializeOpDocumentListBackupsInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson10_serializeOpListContributorInsights struct { +} + +func (*awsAwsjson10_serializeOpListContributorInsights) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson10_serializeOpListContributorInsights) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListContributorInsightsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") + httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.ListContributorInsights") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson10_serializeOpDocumentListContributorInsightsInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson10_serializeOpListExports struct { +} + +func (*awsAwsjson10_serializeOpListExports) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson10_serializeOpListExports) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListExportsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") + httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.ListExports") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson10_serializeOpDocumentListExportsInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson10_serializeOpListGlobalTables struct { +} + +func (*awsAwsjson10_serializeOpListGlobalTables) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson10_serializeOpListGlobalTables) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListGlobalTablesInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") + httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.ListGlobalTables") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson10_serializeOpDocumentListGlobalTablesInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson10_serializeOpListImports struct { +} + +func (*awsAwsjson10_serializeOpListImports) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson10_serializeOpListImports) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListImportsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") + httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.ListImports") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson10_serializeOpDocumentListImportsInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson10_serializeOpListTables struct { +} + +func (*awsAwsjson10_serializeOpListTables) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson10_serializeOpListTables) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListTablesInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") + httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.ListTables") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson10_serializeOpDocumentListTablesInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson10_serializeOpListTagsOfResource struct { +} + +func (*awsAwsjson10_serializeOpListTagsOfResource) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson10_serializeOpListTagsOfResource) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListTagsOfResourceInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") + httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.ListTagsOfResource") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson10_serializeOpDocumentListTagsOfResourceInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson10_serializeOpPutItem struct { +} + +func (*awsAwsjson10_serializeOpPutItem) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson10_serializeOpPutItem) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutItemInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") + httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.PutItem") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson10_serializeOpDocumentPutItemInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson10_serializeOpPutResourcePolicy struct { +} + +func (*awsAwsjson10_serializeOpPutResourcePolicy) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson10_serializeOpPutResourcePolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutResourcePolicyInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") + httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.PutResourcePolicy") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson10_serializeOpDocumentPutResourcePolicyInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson10_serializeOpQuery struct { +} + +func (*awsAwsjson10_serializeOpQuery) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson10_serializeOpQuery) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*QueryInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") + httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.Query") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson10_serializeOpDocumentQueryInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson10_serializeOpRestoreTableFromBackup struct { +} + +func (*awsAwsjson10_serializeOpRestoreTableFromBackup) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson10_serializeOpRestoreTableFromBackup) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*RestoreTableFromBackupInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") + httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.RestoreTableFromBackup") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson10_serializeOpDocumentRestoreTableFromBackupInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson10_serializeOpRestoreTableToPointInTime struct { +} + +func (*awsAwsjson10_serializeOpRestoreTableToPointInTime) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson10_serializeOpRestoreTableToPointInTime) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*RestoreTableToPointInTimeInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") + httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.RestoreTableToPointInTime") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson10_serializeOpDocumentRestoreTableToPointInTimeInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson10_serializeOpScan struct { +} + +func (*awsAwsjson10_serializeOpScan) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson10_serializeOpScan) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ScanInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") + httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.Scan") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson10_serializeOpDocumentScanInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson10_serializeOpTagResource struct { +} + +func (*awsAwsjson10_serializeOpTagResource) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson10_serializeOpTagResource) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*TagResourceInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") + httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.TagResource") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson10_serializeOpDocumentTagResourceInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson10_serializeOpTransactGetItems struct { +} + +func (*awsAwsjson10_serializeOpTransactGetItems) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson10_serializeOpTransactGetItems) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*TransactGetItemsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") + httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.TransactGetItems") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson10_serializeOpDocumentTransactGetItemsInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson10_serializeOpTransactWriteItems struct { +} + +func (*awsAwsjson10_serializeOpTransactWriteItems) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson10_serializeOpTransactWriteItems) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*TransactWriteItemsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") + httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.TransactWriteItems") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson10_serializeOpDocumentTransactWriteItemsInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson10_serializeOpUntagResource struct { +} + +func (*awsAwsjson10_serializeOpUntagResource) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson10_serializeOpUntagResource) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*UntagResourceInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") + httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.UntagResource") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson10_serializeOpDocumentUntagResourceInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson10_serializeOpUpdateContinuousBackups struct { +} + +func (*awsAwsjson10_serializeOpUpdateContinuousBackups) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson10_serializeOpUpdateContinuousBackups) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*UpdateContinuousBackupsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") + httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.UpdateContinuousBackups") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson10_serializeOpDocumentUpdateContinuousBackupsInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson10_serializeOpUpdateContributorInsights struct { +} + +func (*awsAwsjson10_serializeOpUpdateContributorInsights) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson10_serializeOpUpdateContributorInsights) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*UpdateContributorInsightsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") + httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.UpdateContributorInsights") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson10_serializeOpDocumentUpdateContributorInsightsInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson10_serializeOpUpdateGlobalTable struct { +} + +func (*awsAwsjson10_serializeOpUpdateGlobalTable) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson10_serializeOpUpdateGlobalTable) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*UpdateGlobalTableInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") + httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.UpdateGlobalTable") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson10_serializeOpDocumentUpdateGlobalTableInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson10_serializeOpUpdateGlobalTableSettings struct { +} + +func (*awsAwsjson10_serializeOpUpdateGlobalTableSettings) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson10_serializeOpUpdateGlobalTableSettings) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*UpdateGlobalTableSettingsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") + httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.UpdateGlobalTableSettings") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson10_serializeOpDocumentUpdateGlobalTableSettingsInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson10_serializeOpUpdateItem struct { +} + +func (*awsAwsjson10_serializeOpUpdateItem) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson10_serializeOpUpdateItem) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*UpdateItemInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") + httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.UpdateItem") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson10_serializeOpDocumentUpdateItemInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson10_serializeOpUpdateKinesisStreamingDestination struct { +} + +func (*awsAwsjson10_serializeOpUpdateKinesisStreamingDestination) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson10_serializeOpUpdateKinesisStreamingDestination) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*UpdateKinesisStreamingDestinationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") + httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.UpdateKinesisStreamingDestination") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson10_serializeOpDocumentUpdateKinesisStreamingDestinationInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson10_serializeOpUpdateTable struct { +} + +func (*awsAwsjson10_serializeOpUpdateTable) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson10_serializeOpUpdateTable) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*UpdateTableInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") + httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.UpdateTable") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson10_serializeOpDocumentUpdateTableInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson10_serializeOpUpdateTableReplicaAutoScaling struct { +} + +func (*awsAwsjson10_serializeOpUpdateTableReplicaAutoScaling) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson10_serializeOpUpdateTableReplicaAutoScaling) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*UpdateTableReplicaAutoScalingInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") + httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.UpdateTableReplicaAutoScaling") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson10_serializeOpDocumentUpdateTableReplicaAutoScalingInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson10_serializeOpUpdateTimeToLive struct { +} + +func (*awsAwsjson10_serializeOpUpdateTimeToLive) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson10_serializeOpUpdateTimeToLive) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*UpdateTimeToLiveInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") + httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.UpdateTimeToLive") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson10_serializeOpDocumentUpdateTimeToLiveInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +func awsAwsjson10_serializeDocumentAttributeDefinition(v *types.AttributeDefinition, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.AttributeName != nil { + ok := object.Key("AttributeName") + ok.String(*v.AttributeName) + } + + if len(v.AttributeType) > 0 { + ok := object.Key("AttributeType") + ok.String(string(v.AttributeType)) + } + + return nil +} + +func awsAwsjson10_serializeDocumentAttributeDefinitions(v []types.AttributeDefinition, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson10_serializeDocumentAttributeDefinition(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson10_serializeDocumentAttributeNameList(v []string, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + +func awsAwsjson10_serializeDocumentAttributeUpdates(v map[string]types.AttributeValueUpdate, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + for key := range v { + om := object.Key(key) + mapVar := v[key] + if err := awsAwsjson10_serializeDocumentAttributeValueUpdate(&mapVar, om); err != nil { + return err + } + } + return nil +} + +func awsAwsjson10_serializeDocumentAttributeValue(v types.AttributeValue, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + switch uv := v.(type) { + case *types.AttributeValueMemberB: + av := object.Key("B") + av.Base64EncodeBytes(uv.Value) + + case *types.AttributeValueMemberBOOL: + av := object.Key("BOOL") + av.Boolean(uv.Value) + + case *types.AttributeValueMemberBS: + av := object.Key("BS") + if err := awsAwsjson10_serializeDocumentBinarySetAttributeValue(uv.Value, av); err != nil { + return err + } + + case *types.AttributeValueMemberL: + av := object.Key("L") + if err := awsAwsjson10_serializeDocumentListAttributeValue(uv.Value, av); err != nil { + return err + } + + case *types.AttributeValueMemberM: + av := object.Key("M") + if err := awsAwsjson10_serializeDocumentMapAttributeValue(uv.Value, av); err != nil { + return err + } + + case *types.AttributeValueMemberN: + av := object.Key("N") + av.String(uv.Value) + + case *types.AttributeValueMemberNS: + av := object.Key("NS") + if err := awsAwsjson10_serializeDocumentNumberSetAttributeValue(uv.Value, av); err != nil { + return err + } + + case *types.AttributeValueMemberNULL: + av := object.Key("NULL") + av.Boolean(uv.Value) + + case *types.AttributeValueMemberS: + av := object.Key("S") + av.String(uv.Value) + + case *types.AttributeValueMemberSS: + av := object.Key("SS") + if err := awsAwsjson10_serializeDocumentStringSetAttributeValue(uv.Value, av); err != nil { + return err + } + + default: + return fmt.Errorf("attempted to serialize unknown member type %T for union %T", uv, v) + + } + return nil +} + +func awsAwsjson10_serializeDocumentAttributeValueList(v []types.AttributeValue, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if vv := v[i]; vv == nil { + continue + } + if err := awsAwsjson10_serializeDocumentAttributeValue(v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson10_serializeDocumentAttributeValueUpdate(v *types.AttributeValueUpdate, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if len(v.Action) > 0 { + ok := object.Key("Action") + ok.String(string(v.Action)) + } + + if v.Value != nil { + ok := object.Key("Value") + if err := awsAwsjson10_serializeDocumentAttributeValue(v.Value, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson10_serializeDocumentAutoScalingPolicyUpdate(v *types.AutoScalingPolicyUpdate, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.PolicyName != nil { + ok := object.Key("PolicyName") + ok.String(*v.PolicyName) + } + + if v.TargetTrackingScalingPolicyConfiguration != nil { + ok := object.Key("TargetTrackingScalingPolicyConfiguration") + if err := awsAwsjson10_serializeDocumentAutoScalingTargetTrackingScalingPolicyConfigurationUpdate(v.TargetTrackingScalingPolicyConfiguration, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson10_serializeDocumentAutoScalingSettingsUpdate(v *types.AutoScalingSettingsUpdate, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.AutoScalingDisabled != nil { + ok := object.Key("AutoScalingDisabled") + ok.Boolean(*v.AutoScalingDisabled) + } + + if v.AutoScalingRoleArn != nil { + ok := object.Key("AutoScalingRoleArn") + ok.String(*v.AutoScalingRoleArn) + } + + if v.MaximumUnits != nil { + ok := object.Key("MaximumUnits") + ok.Long(*v.MaximumUnits) + } + + if v.MinimumUnits != nil { + ok := object.Key("MinimumUnits") + ok.Long(*v.MinimumUnits) + } + + if v.ScalingPolicyUpdate != nil { + ok := object.Key("ScalingPolicyUpdate") + if err := awsAwsjson10_serializeDocumentAutoScalingPolicyUpdate(v.ScalingPolicyUpdate, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson10_serializeDocumentAutoScalingTargetTrackingScalingPolicyConfigurationUpdate(v *types.AutoScalingTargetTrackingScalingPolicyConfigurationUpdate, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.DisableScaleIn != nil { + ok := object.Key("DisableScaleIn") + ok.Boolean(*v.DisableScaleIn) + } + + if v.ScaleInCooldown != nil { + ok := object.Key("ScaleInCooldown") + ok.Integer(*v.ScaleInCooldown) + } + + if v.ScaleOutCooldown != nil { + ok := object.Key("ScaleOutCooldown") + ok.Integer(*v.ScaleOutCooldown) + } + + if v.TargetValue != nil { + ok := object.Key("TargetValue") + switch { + case math.IsNaN(*v.TargetValue): + ok.String("NaN") + + case math.IsInf(*v.TargetValue, 1): + ok.String("Infinity") + + case math.IsInf(*v.TargetValue, -1): + ok.String("-Infinity") + + default: + ok.Double(*v.TargetValue) + + } + } + + return nil +} + +func awsAwsjson10_serializeDocumentBatchGetRequestMap(v map[string]types.KeysAndAttributes, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + for key := range v { + om := object.Key(key) + mapVar := v[key] + if err := awsAwsjson10_serializeDocumentKeysAndAttributes(&mapVar, om); err != nil { + return err + } + } + return nil +} + +func awsAwsjson10_serializeDocumentBatchStatementRequest(v *types.BatchStatementRequest, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ConsistentRead != nil { + ok := object.Key("ConsistentRead") + ok.Boolean(*v.ConsistentRead) + } + + if v.Parameters != nil { + ok := object.Key("Parameters") + if err := awsAwsjson10_serializeDocumentPreparedStatementParameters(v.Parameters, ok); err != nil { + return err + } + } + + if len(v.ReturnValuesOnConditionCheckFailure) > 0 { + ok := object.Key("ReturnValuesOnConditionCheckFailure") + ok.String(string(v.ReturnValuesOnConditionCheckFailure)) + } + + if v.Statement != nil { + ok := object.Key("Statement") + ok.String(*v.Statement) + } + + return nil +} + +func awsAwsjson10_serializeDocumentBatchWriteItemRequestMap(v map[string][]types.WriteRequest, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + for key := range v { + om := object.Key(key) + if vv := v[key]; vv == nil { + continue + } + if err := awsAwsjson10_serializeDocumentWriteRequests(v[key], om); err != nil { + return err + } + } + return nil +} + +func awsAwsjson10_serializeDocumentBinarySetAttributeValue(v [][]byte, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if vv := v[i]; vv == nil { + continue + } + av.Base64EncodeBytes(v[i]) + } + return nil +} + +func awsAwsjson10_serializeDocumentCondition(v *types.Condition, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.AttributeValueList != nil { + ok := object.Key("AttributeValueList") + if err := awsAwsjson10_serializeDocumentAttributeValueList(v.AttributeValueList, ok); err != nil { + return err + } + } + + if len(v.ComparisonOperator) > 0 { + ok := object.Key("ComparisonOperator") + ok.String(string(v.ComparisonOperator)) + } + + return nil +} + +func awsAwsjson10_serializeDocumentConditionCheck(v *types.ConditionCheck, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ConditionExpression != nil { + ok := object.Key("ConditionExpression") + ok.String(*v.ConditionExpression) + } + + if v.ExpressionAttributeNames != nil { + ok := object.Key("ExpressionAttributeNames") + if err := awsAwsjson10_serializeDocumentExpressionAttributeNameMap(v.ExpressionAttributeNames, ok); err != nil { + return err + } + } + + if v.ExpressionAttributeValues != nil { + ok := object.Key("ExpressionAttributeValues") + if err := awsAwsjson10_serializeDocumentExpressionAttributeValueMap(v.ExpressionAttributeValues, ok); err != nil { + return err + } + } + + if v.Key != nil { + ok := object.Key("Key") + if err := awsAwsjson10_serializeDocumentKey(v.Key, ok); err != nil { + return err + } + } + + if len(v.ReturnValuesOnConditionCheckFailure) > 0 { + ok := object.Key("ReturnValuesOnConditionCheckFailure") + ok.String(string(v.ReturnValuesOnConditionCheckFailure)) + } + + if v.TableName != nil { + ok := object.Key("TableName") + ok.String(*v.TableName) + } + + return nil +} + +func awsAwsjson10_serializeDocumentCreateGlobalSecondaryIndexAction(v *types.CreateGlobalSecondaryIndexAction, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.IndexName != nil { + ok := object.Key("IndexName") + ok.String(*v.IndexName) + } + + if v.KeySchema != nil { + ok := object.Key("KeySchema") + if err := awsAwsjson10_serializeDocumentKeySchema(v.KeySchema, ok); err != nil { + return err + } + } + + if v.OnDemandThroughput != nil { + ok := object.Key("OnDemandThroughput") + if err := awsAwsjson10_serializeDocumentOnDemandThroughput(v.OnDemandThroughput, ok); err != nil { + return err + } + } + + if v.Projection != nil { + ok := object.Key("Projection") + if err := awsAwsjson10_serializeDocumentProjection(v.Projection, ok); err != nil { + return err + } + } + + if v.ProvisionedThroughput != nil { + ok := object.Key("ProvisionedThroughput") + if err := awsAwsjson10_serializeDocumentProvisionedThroughput(v.ProvisionedThroughput, ok); err != nil { + return err + } + } + + if v.WarmThroughput != nil { + ok := object.Key("WarmThroughput") + if err := awsAwsjson10_serializeDocumentWarmThroughput(v.WarmThroughput, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson10_serializeDocumentCreateGlobalTableWitnessGroupMemberAction(v *types.CreateGlobalTableWitnessGroupMemberAction, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.RegionName != nil { + ok := object.Key("RegionName") + ok.String(*v.RegionName) + } + + return nil +} + +func awsAwsjson10_serializeDocumentCreateReplicaAction(v *types.CreateReplicaAction, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.RegionName != nil { + ok := object.Key("RegionName") + ok.String(*v.RegionName) + } + + return nil +} + +func awsAwsjson10_serializeDocumentCreateReplicationGroupMemberAction(v *types.CreateReplicationGroupMemberAction, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.GlobalSecondaryIndexes != nil { + ok := object.Key("GlobalSecondaryIndexes") + if err := awsAwsjson10_serializeDocumentReplicaGlobalSecondaryIndexList(v.GlobalSecondaryIndexes, ok); err != nil { + return err + } + } + + if v.KMSMasterKeyId != nil { + ok := object.Key("KMSMasterKeyId") + ok.String(*v.KMSMasterKeyId) + } + + if v.OnDemandThroughputOverride != nil { + ok := object.Key("OnDemandThroughputOverride") + if err := awsAwsjson10_serializeDocumentOnDemandThroughputOverride(v.OnDemandThroughputOverride, ok); err != nil { + return err + } + } + + if v.ProvisionedThroughputOverride != nil { + ok := object.Key("ProvisionedThroughputOverride") + if err := awsAwsjson10_serializeDocumentProvisionedThroughputOverride(v.ProvisionedThroughputOverride, ok); err != nil { + return err + } + } + + if v.RegionName != nil { + ok := object.Key("RegionName") + ok.String(*v.RegionName) + } + + if len(v.TableClassOverride) > 0 { + ok := object.Key("TableClassOverride") + ok.String(string(v.TableClassOverride)) + } + + return nil +} + +func awsAwsjson10_serializeDocumentCsvHeaderList(v []string, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + +func awsAwsjson10_serializeDocumentCsvOptions(v *types.CsvOptions, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Delimiter != nil { + ok := object.Key("Delimiter") + ok.String(*v.Delimiter) + } + + if v.HeaderList != nil { + ok := object.Key("HeaderList") + if err := awsAwsjson10_serializeDocumentCsvHeaderList(v.HeaderList, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson10_serializeDocumentDelete(v *types.Delete, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ConditionExpression != nil { + ok := object.Key("ConditionExpression") + ok.String(*v.ConditionExpression) + } + + if v.ExpressionAttributeNames != nil { + ok := object.Key("ExpressionAttributeNames") + if err := awsAwsjson10_serializeDocumentExpressionAttributeNameMap(v.ExpressionAttributeNames, ok); err != nil { + return err + } + } + + if v.ExpressionAttributeValues != nil { + ok := object.Key("ExpressionAttributeValues") + if err := awsAwsjson10_serializeDocumentExpressionAttributeValueMap(v.ExpressionAttributeValues, ok); err != nil { + return err + } + } + + if v.Key != nil { + ok := object.Key("Key") + if err := awsAwsjson10_serializeDocumentKey(v.Key, ok); err != nil { + return err + } + } + + if len(v.ReturnValuesOnConditionCheckFailure) > 0 { + ok := object.Key("ReturnValuesOnConditionCheckFailure") + ok.String(string(v.ReturnValuesOnConditionCheckFailure)) + } + + if v.TableName != nil { + ok := object.Key("TableName") + ok.String(*v.TableName) + } + + return nil +} + +func awsAwsjson10_serializeDocumentDeleteGlobalSecondaryIndexAction(v *types.DeleteGlobalSecondaryIndexAction, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.IndexName != nil { + ok := object.Key("IndexName") + ok.String(*v.IndexName) + } + + return nil +} + +func awsAwsjson10_serializeDocumentDeleteGlobalTableWitnessGroupMemberAction(v *types.DeleteGlobalTableWitnessGroupMemberAction, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.RegionName != nil { + ok := object.Key("RegionName") + ok.String(*v.RegionName) + } + + return nil +} + +func awsAwsjson10_serializeDocumentDeleteReplicaAction(v *types.DeleteReplicaAction, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.RegionName != nil { + ok := object.Key("RegionName") + ok.String(*v.RegionName) + } + + return nil +} + +func awsAwsjson10_serializeDocumentDeleteReplicationGroupMemberAction(v *types.DeleteReplicationGroupMemberAction, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.RegionName != nil { + ok := object.Key("RegionName") + ok.String(*v.RegionName) + } + + return nil +} + +func awsAwsjson10_serializeDocumentDeleteRequest(v *types.DeleteRequest, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Key != nil { + ok := object.Key("Key") + if err := awsAwsjson10_serializeDocumentKey(v.Key, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson10_serializeDocumentEnableKinesisStreamingConfiguration(v *types.EnableKinesisStreamingConfiguration, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if len(v.ApproximateCreationDateTimePrecision) > 0 { + ok := object.Key("ApproximateCreationDateTimePrecision") + ok.String(string(v.ApproximateCreationDateTimePrecision)) + } + + return nil +} + +func awsAwsjson10_serializeDocumentExpectedAttributeMap(v map[string]types.ExpectedAttributeValue, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + for key := range v { + om := object.Key(key) + mapVar := v[key] + if err := awsAwsjson10_serializeDocumentExpectedAttributeValue(&mapVar, om); err != nil { + return err + } + } + return nil +} + +func awsAwsjson10_serializeDocumentExpectedAttributeValue(v *types.ExpectedAttributeValue, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.AttributeValueList != nil { + ok := object.Key("AttributeValueList") + if err := awsAwsjson10_serializeDocumentAttributeValueList(v.AttributeValueList, ok); err != nil { + return err + } + } + + if len(v.ComparisonOperator) > 0 { + ok := object.Key("ComparisonOperator") + ok.String(string(v.ComparisonOperator)) + } + + if v.Exists != nil { + ok := object.Key("Exists") + ok.Boolean(*v.Exists) + } + + if v.Value != nil { + ok := object.Key("Value") + if err := awsAwsjson10_serializeDocumentAttributeValue(v.Value, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson10_serializeDocumentExpressionAttributeNameMap(v map[string]string, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + for key := range v { + om := object.Key(key) + om.String(v[key]) + } + return nil +} + +func awsAwsjson10_serializeDocumentExpressionAttributeValueMap(v map[string]types.AttributeValue, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + for key := range v { + om := object.Key(key) + if vv := v[key]; vv == nil { + continue + } + if err := awsAwsjson10_serializeDocumentAttributeValue(v[key], om); err != nil { + return err + } + } + return nil +} + +func awsAwsjson10_serializeDocumentFilterConditionMap(v map[string]types.Condition, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + for key := range v { + om := object.Key(key) + mapVar := v[key] + if err := awsAwsjson10_serializeDocumentCondition(&mapVar, om); err != nil { + return err + } + } + return nil +} + +func awsAwsjson10_serializeDocumentGet(v *types.Get, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ExpressionAttributeNames != nil { + ok := object.Key("ExpressionAttributeNames") + if err := awsAwsjson10_serializeDocumentExpressionAttributeNameMap(v.ExpressionAttributeNames, ok); err != nil { + return err + } + } + + if v.Key != nil { + ok := object.Key("Key") + if err := awsAwsjson10_serializeDocumentKey(v.Key, ok); err != nil { + return err + } + } + + if v.ProjectionExpression != nil { + ok := object.Key("ProjectionExpression") + ok.String(*v.ProjectionExpression) + } + + if v.TableName != nil { + ok := object.Key("TableName") + ok.String(*v.TableName) + } + + return nil +} + +func awsAwsjson10_serializeDocumentGlobalSecondaryIndex(v *types.GlobalSecondaryIndex, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.IndexName != nil { + ok := object.Key("IndexName") + ok.String(*v.IndexName) + } + + if v.KeySchema != nil { + ok := object.Key("KeySchema") + if err := awsAwsjson10_serializeDocumentKeySchema(v.KeySchema, ok); err != nil { + return err + } + } + + if v.OnDemandThroughput != nil { + ok := object.Key("OnDemandThroughput") + if err := awsAwsjson10_serializeDocumentOnDemandThroughput(v.OnDemandThroughput, ok); err != nil { + return err + } + } + + if v.Projection != nil { + ok := object.Key("Projection") + if err := awsAwsjson10_serializeDocumentProjection(v.Projection, ok); err != nil { + return err + } + } + + if v.ProvisionedThroughput != nil { + ok := object.Key("ProvisionedThroughput") + if err := awsAwsjson10_serializeDocumentProvisionedThroughput(v.ProvisionedThroughput, ok); err != nil { + return err + } + } + + if v.WarmThroughput != nil { + ok := object.Key("WarmThroughput") + if err := awsAwsjson10_serializeDocumentWarmThroughput(v.WarmThroughput, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson10_serializeDocumentGlobalSecondaryIndexAutoScalingUpdate(v *types.GlobalSecondaryIndexAutoScalingUpdate, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.IndexName != nil { + ok := object.Key("IndexName") + ok.String(*v.IndexName) + } + + if v.ProvisionedWriteCapacityAutoScalingUpdate != nil { + ok := object.Key("ProvisionedWriteCapacityAutoScalingUpdate") + if err := awsAwsjson10_serializeDocumentAutoScalingSettingsUpdate(v.ProvisionedWriteCapacityAutoScalingUpdate, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson10_serializeDocumentGlobalSecondaryIndexAutoScalingUpdateList(v []types.GlobalSecondaryIndexAutoScalingUpdate, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson10_serializeDocumentGlobalSecondaryIndexAutoScalingUpdate(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson10_serializeDocumentGlobalSecondaryIndexList(v []types.GlobalSecondaryIndex, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson10_serializeDocumentGlobalSecondaryIndex(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson10_serializeDocumentGlobalSecondaryIndexUpdate(v *types.GlobalSecondaryIndexUpdate, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Create != nil { + ok := object.Key("Create") + if err := awsAwsjson10_serializeDocumentCreateGlobalSecondaryIndexAction(v.Create, ok); err != nil { + return err + } + } + + if v.Delete != nil { + ok := object.Key("Delete") + if err := awsAwsjson10_serializeDocumentDeleteGlobalSecondaryIndexAction(v.Delete, ok); err != nil { + return err + } + } + + if v.Update != nil { + ok := object.Key("Update") + if err := awsAwsjson10_serializeDocumentUpdateGlobalSecondaryIndexAction(v.Update, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson10_serializeDocumentGlobalSecondaryIndexUpdateList(v []types.GlobalSecondaryIndexUpdate, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson10_serializeDocumentGlobalSecondaryIndexUpdate(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson10_serializeDocumentGlobalTableGlobalSecondaryIndexSettingsUpdate(v *types.GlobalTableGlobalSecondaryIndexSettingsUpdate, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.IndexName != nil { + ok := object.Key("IndexName") + ok.String(*v.IndexName) + } + + if v.ProvisionedWriteCapacityAutoScalingSettingsUpdate != nil { + ok := object.Key("ProvisionedWriteCapacityAutoScalingSettingsUpdate") + if err := awsAwsjson10_serializeDocumentAutoScalingSettingsUpdate(v.ProvisionedWriteCapacityAutoScalingSettingsUpdate, ok); err != nil { + return err + } + } + + if v.ProvisionedWriteCapacityUnits != nil { + ok := object.Key("ProvisionedWriteCapacityUnits") + ok.Long(*v.ProvisionedWriteCapacityUnits) + } + + return nil +} + +func awsAwsjson10_serializeDocumentGlobalTableGlobalSecondaryIndexSettingsUpdateList(v []types.GlobalTableGlobalSecondaryIndexSettingsUpdate, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson10_serializeDocumentGlobalTableGlobalSecondaryIndexSettingsUpdate(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson10_serializeDocumentGlobalTableWitnessGroupUpdate(v *types.GlobalTableWitnessGroupUpdate, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Create != nil { + ok := object.Key("Create") + if err := awsAwsjson10_serializeDocumentCreateGlobalTableWitnessGroupMemberAction(v.Create, ok); err != nil { + return err + } + } + + if v.Delete != nil { + ok := object.Key("Delete") + if err := awsAwsjson10_serializeDocumentDeleteGlobalTableWitnessGroupMemberAction(v.Delete, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson10_serializeDocumentGlobalTableWitnessGroupUpdateList(v []types.GlobalTableWitnessGroupUpdate, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson10_serializeDocumentGlobalTableWitnessGroupUpdate(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson10_serializeDocumentIncrementalExportSpecification(v *types.IncrementalExportSpecification, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ExportFromTime != nil { + ok := object.Key("ExportFromTime") + ok.Double(smithytime.FormatEpochSeconds(*v.ExportFromTime)) + } + + if v.ExportToTime != nil { + ok := object.Key("ExportToTime") + ok.Double(smithytime.FormatEpochSeconds(*v.ExportToTime)) + } + + if len(v.ExportViewType) > 0 { + ok := object.Key("ExportViewType") + ok.String(string(v.ExportViewType)) + } + + return nil +} + +func awsAwsjson10_serializeDocumentInputFormatOptions(v *types.InputFormatOptions, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Csv != nil { + ok := object.Key("Csv") + if err := awsAwsjson10_serializeDocumentCsvOptions(v.Csv, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson10_serializeDocumentKey(v map[string]types.AttributeValue, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + for key := range v { + om := object.Key(key) + if vv := v[key]; vv == nil { + continue + } + if err := awsAwsjson10_serializeDocumentAttributeValue(v[key], om); err != nil { + return err + } + } + return nil +} + +func awsAwsjson10_serializeDocumentKeyConditions(v map[string]types.Condition, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + for key := range v { + om := object.Key(key) + mapVar := v[key] + if err := awsAwsjson10_serializeDocumentCondition(&mapVar, om); err != nil { + return err + } + } + return nil +} + +func awsAwsjson10_serializeDocumentKeyList(v []map[string]types.AttributeValue, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if vv := v[i]; vv == nil { + continue + } + if err := awsAwsjson10_serializeDocumentKey(v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson10_serializeDocumentKeysAndAttributes(v *types.KeysAndAttributes, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.AttributesToGet != nil { + ok := object.Key("AttributesToGet") + if err := awsAwsjson10_serializeDocumentAttributeNameList(v.AttributesToGet, ok); err != nil { + return err + } + } + + if v.ConsistentRead != nil { + ok := object.Key("ConsistentRead") + ok.Boolean(*v.ConsistentRead) + } + + if v.ExpressionAttributeNames != nil { + ok := object.Key("ExpressionAttributeNames") + if err := awsAwsjson10_serializeDocumentExpressionAttributeNameMap(v.ExpressionAttributeNames, ok); err != nil { + return err + } + } + + if v.Keys != nil { + ok := object.Key("Keys") + if err := awsAwsjson10_serializeDocumentKeyList(v.Keys, ok); err != nil { + return err + } + } + + if v.ProjectionExpression != nil { + ok := object.Key("ProjectionExpression") + ok.String(*v.ProjectionExpression) + } + + return nil +} + +func awsAwsjson10_serializeDocumentKeySchema(v []types.KeySchemaElement, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson10_serializeDocumentKeySchemaElement(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson10_serializeDocumentKeySchemaElement(v *types.KeySchemaElement, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.AttributeName != nil { + ok := object.Key("AttributeName") + ok.String(*v.AttributeName) + } + + if len(v.KeyType) > 0 { + ok := object.Key("KeyType") + ok.String(string(v.KeyType)) + } + + return nil +} + +func awsAwsjson10_serializeDocumentListAttributeValue(v []types.AttributeValue, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if vv := v[i]; vv == nil { + continue + } + if err := awsAwsjson10_serializeDocumentAttributeValue(v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson10_serializeDocumentLocalSecondaryIndex(v *types.LocalSecondaryIndex, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.IndexName != nil { + ok := object.Key("IndexName") + ok.String(*v.IndexName) + } + + if v.KeySchema != nil { + ok := object.Key("KeySchema") + if err := awsAwsjson10_serializeDocumentKeySchema(v.KeySchema, ok); err != nil { + return err + } + } + + if v.Projection != nil { + ok := object.Key("Projection") + if err := awsAwsjson10_serializeDocumentProjection(v.Projection, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson10_serializeDocumentLocalSecondaryIndexList(v []types.LocalSecondaryIndex, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson10_serializeDocumentLocalSecondaryIndex(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson10_serializeDocumentMapAttributeValue(v map[string]types.AttributeValue, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + for key := range v { + om := object.Key(key) + if vv := v[key]; vv == nil { + continue + } + if err := awsAwsjson10_serializeDocumentAttributeValue(v[key], om); err != nil { + return err + } + } + return nil +} + +func awsAwsjson10_serializeDocumentNonKeyAttributeNameList(v []string, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + +func awsAwsjson10_serializeDocumentNumberSetAttributeValue(v []string, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + +func awsAwsjson10_serializeDocumentOnDemandThroughput(v *types.OnDemandThroughput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.MaxReadRequestUnits != nil { + ok := object.Key("MaxReadRequestUnits") + ok.Long(*v.MaxReadRequestUnits) + } + + if v.MaxWriteRequestUnits != nil { + ok := object.Key("MaxWriteRequestUnits") + ok.Long(*v.MaxWriteRequestUnits) + } + + return nil +} + +func awsAwsjson10_serializeDocumentOnDemandThroughputOverride(v *types.OnDemandThroughputOverride, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.MaxReadRequestUnits != nil { + ok := object.Key("MaxReadRequestUnits") + ok.Long(*v.MaxReadRequestUnits) + } + + return nil +} + +func awsAwsjson10_serializeDocumentParameterizedStatement(v *types.ParameterizedStatement, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Parameters != nil { + ok := object.Key("Parameters") + if err := awsAwsjson10_serializeDocumentPreparedStatementParameters(v.Parameters, ok); err != nil { + return err + } + } + + if len(v.ReturnValuesOnConditionCheckFailure) > 0 { + ok := object.Key("ReturnValuesOnConditionCheckFailure") + ok.String(string(v.ReturnValuesOnConditionCheckFailure)) + } + + if v.Statement != nil { + ok := object.Key("Statement") + ok.String(*v.Statement) + } + + return nil +} + +func awsAwsjson10_serializeDocumentParameterizedStatements(v []types.ParameterizedStatement, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson10_serializeDocumentParameterizedStatement(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson10_serializeDocumentPartiQLBatchRequest(v []types.BatchStatementRequest, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson10_serializeDocumentBatchStatementRequest(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson10_serializeDocumentPointInTimeRecoverySpecification(v *types.PointInTimeRecoverySpecification, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.PointInTimeRecoveryEnabled != nil { + ok := object.Key("PointInTimeRecoveryEnabled") + ok.Boolean(*v.PointInTimeRecoveryEnabled) + } + + if v.RecoveryPeriodInDays != nil { + ok := object.Key("RecoveryPeriodInDays") + ok.Integer(*v.RecoveryPeriodInDays) + } + + return nil +} + +func awsAwsjson10_serializeDocumentPreparedStatementParameters(v []types.AttributeValue, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if vv := v[i]; vv == nil { + continue + } + if err := awsAwsjson10_serializeDocumentAttributeValue(v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson10_serializeDocumentProjection(v *types.Projection, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.NonKeyAttributes != nil { + ok := object.Key("NonKeyAttributes") + if err := awsAwsjson10_serializeDocumentNonKeyAttributeNameList(v.NonKeyAttributes, ok); err != nil { + return err + } + } + + if len(v.ProjectionType) > 0 { + ok := object.Key("ProjectionType") + ok.String(string(v.ProjectionType)) + } + + return nil +} + +func awsAwsjson10_serializeDocumentProvisionedThroughput(v *types.ProvisionedThroughput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ReadCapacityUnits != nil { + ok := object.Key("ReadCapacityUnits") + ok.Long(*v.ReadCapacityUnits) + } + + if v.WriteCapacityUnits != nil { + ok := object.Key("WriteCapacityUnits") + ok.Long(*v.WriteCapacityUnits) + } + + return nil +} + +func awsAwsjson10_serializeDocumentProvisionedThroughputOverride(v *types.ProvisionedThroughputOverride, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ReadCapacityUnits != nil { + ok := object.Key("ReadCapacityUnits") + ok.Long(*v.ReadCapacityUnits) + } + + return nil +} + +func awsAwsjson10_serializeDocumentPut(v *types.Put, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ConditionExpression != nil { + ok := object.Key("ConditionExpression") + ok.String(*v.ConditionExpression) + } + + if v.ExpressionAttributeNames != nil { + ok := object.Key("ExpressionAttributeNames") + if err := awsAwsjson10_serializeDocumentExpressionAttributeNameMap(v.ExpressionAttributeNames, ok); err != nil { + return err + } + } + + if v.ExpressionAttributeValues != nil { + ok := object.Key("ExpressionAttributeValues") + if err := awsAwsjson10_serializeDocumentExpressionAttributeValueMap(v.ExpressionAttributeValues, ok); err != nil { + return err + } + } + + if v.Item != nil { + ok := object.Key("Item") + if err := awsAwsjson10_serializeDocumentPutItemInputAttributeMap(v.Item, ok); err != nil { + return err + } + } + + if len(v.ReturnValuesOnConditionCheckFailure) > 0 { + ok := object.Key("ReturnValuesOnConditionCheckFailure") + ok.String(string(v.ReturnValuesOnConditionCheckFailure)) + } + + if v.TableName != nil { + ok := object.Key("TableName") + ok.String(*v.TableName) + } + + return nil +} + +func awsAwsjson10_serializeDocumentPutItemInputAttributeMap(v map[string]types.AttributeValue, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + for key := range v { + om := object.Key(key) + if vv := v[key]; vv == nil { + continue + } + if err := awsAwsjson10_serializeDocumentAttributeValue(v[key], om); err != nil { + return err + } + } + return nil +} + +func awsAwsjson10_serializeDocumentPutRequest(v *types.PutRequest, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Item != nil { + ok := object.Key("Item") + if err := awsAwsjson10_serializeDocumentPutItemInputAttributeMap(v.Item, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson10_serializeDocumentReplica(v *types.Replica, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.RegionName != nil { + ok := object.Key("RegionName") + ok.String(*v.RegionName) + } + + return nil +} + +func awsAwsjson10_serializeDocumentReplicaAutoScalingUpdate(v *types.ReplicaAutoScalingUpdate, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.RegionName != nil { + ok := object.Key("RegionName") + ok.String(*v.RegionName) + } + + if v.ReplicaGlobalSecondaryIndexUpdates != nil { + ok := object.Key("ReplicaGlobalSecondaryIndexUpdates") + if err := awsAwsjson10_serializeDocumentReplicaGlobalSecondaryIndexAutoScalingUpdateList(v.ReplicaGlobalSecondaryIndexUpdates, ok); err != nil { + return err + } + } + + if v.ReplicaProvisionedReadCapacityAutoScalingUpdate != nil { + ok := object.Key("ReplicaProvisionedReadCapacityAutoScalingUpdate") + if err := awsAwsjson10_serializeDocumentAutoScalingSettingsUpdate(v.ReplicaProvisionedReadCapacityAutoScalingUpdate, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson10_serializeDocumentReplicaAutoScalingUpdateList(v []types.ReplicaAutoScalingUpdate, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson10_serializeDocumentReplicaAutoScalingUpdate(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson10_serializeDocumentReplicaGlobalSecondaryIndex(v *types.ReplicaGlobalSecondaryIndex, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.IndexName != nil { + ok := object.Key("IndexName") + ok.String(*v.IndexName) + } + + if v.OnDemandThroughputOverride != nil { + ok := object.Key("OnDemandThroughputOverride") + if err := awsAwsjson10_serializeDocumentOnDemandThroughputOverride(v.OnDemandThroughputOverride, ok); err != nil { + return err + } + } + + if v.ProvisionedThroughputOverride != nil { + ok := object.Key("ProvisionedThroughputOverride") + if err := awsAwsjson10_serializeDocumentProvisionedThroughputOverride(v.ProvisionedThroughputOverride, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson10_serializeDocumentReplicaGlobalSecondaryIndexAutoScalingUpdate(v *types.ReplicaGlobalSecondaryIndexAutoScalingUpdate, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.IndexName != nil { + ok := object.Key("IndexName") + ok.String(*v.IndexName) + } + + if v.ProvisionedReadCapacityAutoScalingUpdate != nil { + ok := object.Key("ProvisionedReadCapacityAutoScalingUpdate") + if err := awsAwsjson10_serializeDocumentAutoScalingSettingsUpdate(v.ProvisionedReadCapacityAutoScalingUpdate, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson10_serializeDocumentReplicaGlobalSecondaryIndexAutoScalingUpdateList(v []types.ReplicaGlobalSecondaryIndexAutoScalingUpdate, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson10_serializeDocumentReplicaGlobalSecondaryIndexAutoScalingUpdate(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson10_serializeDocumentReplicaGlobalSecondaryIndexList(v []types.ReplicaGlobalSecondaryIndex, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson10_serializeDocumentReplicaGlobalSecondaryIndex(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson10_serializeDocumentReplicaGlobalSecondaryIndexSettingsUpdate(v *types.ReplicaGlobalSecondaryIndexSettingsUpdate, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.IndexName != nil { + ok := object.Key("IndexName") + ok.String(*v.IndexName) + } + + if v.ProvisionedReadCapacityAutoScalingSettingsUpdate != nil { + ok := object.Key("ProvisionedReadCapacityAutoScalingSettingsUpdate") + if err := awsAwsjson10_serializeDocumentAutoScalingSettingsUpdate(v.ProvisionedReadCapacityAutoScalingSettingsUpdate, ok); err != nil { + return err + } + } + + if v.ProvisionedReadCapacityUnits != nil { + ok := object.Key("ProvisionedReadCapacityUnits") + ok.Long(*v.ProvisionedReadCapacityUnits) + } + + return nil +} + +func awsAwsjson10_serializeDocumentReplicaGlobalSecondaryIndexSettingsUpdateList(v []types.ReplicaGlobalSecondaryIndexSettingsUpdate, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson10_serializeDocumentReplicaGlobalSecondaryIndexSettingsUpdate(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson10_serializeDocumentReplicaList(v []types.Replica, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson10_serializeDocumentReplica(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson10_serializeDocumentReplicaSettingsUpdate(v *types.ReplicaSettingsUpdate, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.RegionName != nil { + ok := object.Key("RegionName") + ok.String(*v.RegionName) + } + + if v.ReplicaGlobalSecondaryIndexSettingsUpdate != nil { + ok := object.Key("ReplicaGlobalSecondaryIndexSettingsUpdate") + if err := awsAwsjson10_serializeDocumentReplicaGlobalSecondaryIndexSettingsUpdateList(v.ReplicaGlobalSecondaryIndexSettingsUpdate, ok); err != nil { + return err + } + } + + if v.ReplicaProvisionedReadCapacityAutoScalingSettingsUpdate != nil { + ok := object.Key("ReplicaProvisionedReadCapacityAutoScalingSettingsUpdate") + if err := awsAwsjson10_serializeDocumentAutoScalingSettingsUpdate(v.ReplicaProvisionedReadCapacityAutoScalingSettingsUpdate, ok); err != nil { + return err + } + } + + if v.ReplicaProvisionedReadCapacityUnits != nil { + ok := object.Key("ReplicaProvisionedReadCapacityUnits") + ok.Long(*v.ReplicaProvisionedReadCapacityUnits) + } + + if len(v.ReplicaTableClass) > 0 { + ok := object.Key("ReplicaTableClass") + ok.String(string(v.ReplicaTableClass)) + } + + return nil +} + +func awsAwsjson10_serializeDocumentReplicaSettingsUpdateList(v []types.ReplicaSettingsUpdate, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson10_serializeDocumentReplicaSettingsUpdate(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson10_serializeDocumentReplicationGroupUpdate(v *types.ReplicationGroupUpdate, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Create != nil { + ok := object.Key("Create") + if err := awsAwsjson10_serializeDocumentCreateReplicationGroupMemberAction(v.Create, ok); err != nil { + return err + } + } + + if v.Delete != nil { + ok := object.Key("Delete") + if err := awsAwsjson10_serializeDocumentDeleteReplicationGroupMemberAction(v.Delete, ok); err != nil { + return err + } + } + + if v.Update != nil { + ok := object.Key("Update") + if err := awsAwsjson10_serializeDocumentUpdateReplicationGroupMemberAction(v.Update, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson10_serializeDocumentReplicationGroupUpdateList(v []types.ReplicationGroupUpdate, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson10_serializeDocumentReplicationGroupUpdate(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson10_serializeDocumentReplicaUpdate(v *types.ReplicaUpdate, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Create != nil { + ok := object.Key("Create") + if err := awsAwsjson10_serializeDocumentCreateReplicaAction(v.Create, ok); err != nil { + return err + } + } + + if v.Delete != nil { + ok := object.Key("Delete") + if err := awsAwsjson10_serializeDocumentDeleteReplicaAction(v.Delete, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson10_serializeDocumentReplicaUpdateList(v []types.ReplicaUpdate, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson10_serializeDocumentReplicaUpdate(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson10_serializeDocumentS3BucketSource(v *types.S3BucketSource, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.S3Bucket != nil { + ok := object.Key("S3Bucket") + ok.String(*v.S3Bucket) + } + + if v.S3BucketOwner != nil { + ok := object.Key("S3BucketOwner") + ok.String(*v.S3BucketOwner) + } + + if v.S3KeyPrefix != nil { + ok := object.Key("S3KeyPrefix") + ok.String(*v.S3KeyPrefix) + } + + return nil +} + +func awsAwsjson10_serializeDocumentSSESpecification(v *types.SSESpecification, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Enabled != nil { + ok := object.Key("Enabled") + ok.Boolean(*v.Enabled) + } + + if v.KMSMasterKeyId != nil { + ok := object.Key("KMSMasterKeyId") + ok.String(*v.KMSMasterKeyId) + } + + if len(v.SSEType) > 0 { + ok := object.Key("SSEType") + ok.String(string(v.SSEType)) + } + + return nil +} + +func awsAwsjson10_serializeDocumentStreamSpecification(v *types.StreamSpecification, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.StreamEnabled != nil { + ok := object.Key("StreamEnabled") + ok.Boolean(*v.StreamEnabled) + } + + if len(v.StreamViewType) > 0 { + ok := object.Key("StreamViewType") + ok.String(string(v.StreamViewType)) + } + + return nil +} + +func awsAwsjson10_serializeDocumentStringSetAttributeValue(v []string, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + +func awsAwsjson10_serializeDocumentTableCreationParameters(v *types.TableCreationParameters, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.AttributeDefinitions != nil { + ok := object.Key("AttributeDefinitions") + if err := awsAwsjson10_serializeDocumentAttributeDefinitions(v.AttributeDefinitions, ok); err != nil { + return err + } + } + + if len(v.BillingMode) > 0 { + ok := object.Key("BillingMode") + ok.String(string(v.BillingMode)) + } + + if v.GlobalSecondaryIndexes != nil { + ok := object.Key("GlobalSecondaryIndexes") + if err := awsAwsjson10_serializeDocumentGlobalSecondaryIndexList(v.GlobalSecondaryIndexes, ok); err != nil { + return err + } + } + + if v.KeySchema != nil { + ok := object.Key("KeySchema") + if err := awsAwsjson10_serializeDocumentKeySchema(v.KeySchema, ok); err != nil { + return err + } + } + + if v.OnDemandThroughput != nil { + ok := object.Key("OnDemandThroughput") + if err := awsAwsjson10_serializeDocumentOnDemandThroughput(v.OnDemandThroughput, ok); err != nil { + return err + } + } + + if v.ProvisionedThroughput != nil { + ok := object.Key("ProvisionedThroughput") + if err := awsAwsjson10_serializeDocumentProvisionedThroughput(v.ProvisionedThroughput, ok); err != nil { + return err + } + } + + if v.SSESpecification != nil { + ok := object.Key("SSESpecification") + if err := awsAwsjson10_serializeDocumentSSESpecification(v.SSESpecification, ok); err != nil { + return err + } + } + + if v.TableName != nil { + ok := object.Key("TableName") + ok.String(*v.TableName) + } + + return nil +} + +func awsAwsjson10_serializeDocumentTag(v *types.Tag, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Key != nil { + ok := object.Key("Key") + ok.String(*v.Key) + } + + if v.Value != nil { + ok := object.Key("Value") + ok.String(*v.Value) + } + + return nil +} + +func awsAwsjson10_serializeDocumentTagKeyList(v []string, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + +func awsAwsjson10_serializeDocumentTagList(v []types.Tag, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson10_serializeDocumentTag(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson10_serializeDocumentTimeToLiveSpecification(v *types.TimeToLiveSpecification, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.AttributeName != nil { + ok := object.Key("AttributeName") + ok.String(*v.AttributeName) + } + + if v.Enabled != nil { + ok := object.Key("Enabled") + ok.Boolean(*v.Enabled) + } + + return nil +} + +func awsAwsjson10_serializeDocumentTransactGetItem(v *types.TransactGetItem, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Get != nil { + ok := object.Key("Get") + if err := awsAwsjson10_serializeDocumentGet(v.Get, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson10_serializeDocumentTransactGetItemList(v []types.TransactGetItem, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson10_serializeDocumentTransactGetItem(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson10_serializeDocumentTransactWriteItem(v *types.TransactWriteItem, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ConditionCheck != nil { + ok := object.Key("ConditionCheck") + if err := awsAwsjson10_serializeDocumentConditionCheck(v.ConditionCheck, ok); err != nil { + return err + } + } + + if v.Delete != nil { + ok := object.Key("Delete") + if err := awsAwsjson10_serializeDocumentDelete(v.Delete, ok); err != nil { + return err + } + } + + if v.Put != nil { + ok := object.Key("Put") + if err := awsAwsjson10_serializeDocumentPut(v.Put, ok); err != nil { + return err + } + } + + if v.Update != nil { + ok := object.Key("Update") + if err := awsAwsjson10_serializeDocumentUpdate(v.Update, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson10_serializeDocumentTransactWriteItemList(v []types.TransactWriteItem, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson10_serializeDocumentTransactWriteItem(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson10_serializeDocumentUpdate(v *types.Update, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ConditionExpression != nil { + ok := object.Key("ConditionExpression") + ok.String(*v.ConditionExpression) + } + + if v.ExpressionAttributeNames != nil { + ok := object.Key("ExpressionAttributeNames") + if err := awsAwsjson10_serializeDocumentExpressionAttributeNameMap(v.ExpressionAttributeNames, ok); err != nil { + return err + } + } + + if v.ExpressionAttributeValues != nil { + ok := object.Key("ExpressionAttributeValues") + if err := awsAwsjson10_serializeDocumentExpressionAttributeValueMap(v.ExpressionAttributeValues, ok); err != nil { + return err + } + } + + if v.Key != nil { + ok := object.Key("Key") + if err := awsAwsjson10_serializeDocumentKey(v.Key, ok); err != nil { + return err + } + } + + if len(v.ReturnValuesOnConditionCheckFailure) > 0 { + ok := object.Key("ReturnValuesOnConditionCheckFailure") + ok.String(string(v.ReturnValuesOnConditionCheckFailure)) + } + + if v.TableName != nil { + ok := object.Key("TableName") + ok.String(*v.TableName) + } + + if v.UpdateExpression != nil { + ok := object.Key("UpdateExpression") + ok.String(*v.UpdateExpression) + } + + return nil +} + +func awsAwsjson10_serializeDocumentUpdateGlobalSecondaryIndexAction(v *types.UpdateGlobalSecondaryIndexAction, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.IndexName != nil { + ok := object.Key("IndexName") + ok.String(*v.IndexName) + } + + if v.OnDemandThroughput != nil { + ok := object.Key("OnDemandThroughput") + if err := awsAwsjson10_serializeDocumentOnDemandThroughput(v.OnDemandThroughput, ok); err != nil { + return err + } + } + + if v.ProvisionedThroughput != nil { + ok := object.Key("ProvisionedThroughput") + if err := awsAwsjson10_serializeDocumentProvisionedThroughput(v.ProvisionedThroughput, ok); err != nil { + return err + } + } + + if v.WarmThroughput != nil { + ok := object.Key("WarmThroughput") + if err := awsAwsjson10_serializeDocumentWarmThroughput(v.WarmThroughput, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson10_serializeDocumentUpdateKinesisStreamingConfiguration(v *types.UpdateKinesisStreamingConfiguration, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if len(v.ApproximateCreationDateTimePrecision) > 0 { + ok := object.Key("ApproximateCreationDateTimePrecision") + ok.String(string(v.ApproximateCreationDateTimePrecision)) + } + + return nil +} + +func awsAwsjson10_serializeDocumentUpdateReplicationGroupMemberAction(v *types.UpdateReplicationGroupMemberAction, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.GlobalSecondaryIndexes != nil { + ok := object.Key("GlobalSecondaryIndexes") + if err := awsAwsjson10_serializeDocumentReplicaGlobalSecondaryIndexList(v.GlobalSecondaryIndexes, ok); err != nil { + return err + } + } + + if v.KMSMasterKeyId != nil { + ok := object.Key("KMSMasterKeyId") + ok.String(*v.KMSMasterKeyId) + } + + if v.OnDemandThroughputOverride != nil { + ok := object.Key("OnDemandThroughputOverride") + if err := awsAwsjson10_serializeDocumentOnDemandThroughputOverride(v.OnDemandThroughputOverride, ok); err != nil { + return err + } + } + + if v.ProvisionedThroughputOverride != nil { + ok := object.Key("ProvisionedThroughputOverride") + if err := awsAwsjson10_serializeDocumentProvisionedThroughputOverride(v.ProvisionedThroughputOverride, ok); err != nil { + return err + } + } + + if v.RegionName != nil { + ok := object.Key("RegionName") + ok.String(*v.RegionName) + } + + if len(v.TableClassOverride) > 0 { + ok := object.Key("TableClassOverride") + ok.String(string(v.TableClassOverride)) + } + + return nil +} + +func awsAwsjson10_serializeDocumentWarmThroughput(v *types.WarmThroughput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ReadUnitsPerSecond != nil { + ok := object.Key("ReadUnitsPerSecond") + ok.Long(*v.ReadUnitsPerSecond) + } + + if v.WriteUnitsPerSecond != nil { + ok := object.Key("WriteUnitsPerSecond") + ok.Long(*v.WriteUnitsPerSecond) + } + + return nil +} + +func awsAwsjson10_serializeDocumentWriteRequest(v *types.WriteRequest, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.DeleteRequest != nil { + ok := object.Key("DeleteRequest") + if err := awsAwsjson10_serializeDocumentDeleteRequest(v.DeleteRequest, ok); err != nil { + return err + } + } + + if v.PutRequest != nil { + ok := object.Key("PutRequest") + if err := awsAwsjson10_serializeDocumentPutRequest(v.PutRequest, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson10_serializeDocumentWriteRequests(v []types.WriteRequest, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson10_serializeDocumentWriteRequest(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson10_serializeOpDocumentBatchExecuteStatementInput(v *BatchExecuteStatementInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if len(v.ReturnConsumedCapacity) > 0 { + ok := object.Key("ReturnConsumedCapacity") + ok.String(string(v.ReturnConsumedCapacity)) + } + + if v.Statements != nil { + ok := object.Key("Statements") + if err := awsAwsjson10_serializeDocumentPartiQLBatchRequest(v.Statements, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson10_serializeOpDocumentBatchGetItemInput(v *BatchGetItemInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.RequestItems != nil { + ok := object.Key("RequestItems") + if err := awsAwsjson10_serializeDocumentBatchGetRequestMap(v.RequestItems, ok); err != nil { + return err + } + } + + if len(v.ReturnConsumedCapacity) > 0 { + ok := object.Key("ReturnConsumedCapacity") + ok.String(string(v.ReturnConsumedCapacity)) + } + + return nil +} + +func awsAwsjson10_serializeOpDocumentBatchWriteItemInput(v *BatchWriteItemInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.RequestItems != nil { + ok := object.Key("RequestItems") + if err := awsAwsjson10_serializeDocumentBatchWriteItemRequestMap(v.RequestItems, ok); err != nil { + return err + } + } + + if len(v.ReturnConsumedCapacity) > 0 { + ok := object.Key("ReturnConsumedCapacity") + ok.String(string(v.ReturnConsumedCapacity)) + } + + if len(v.ReturnItemCollectionMetrics) > 0 { + ok := object.Key("ReturnItemCollectionMetrics") + ok.String(string(v.ReturnItemCollectionMetrics)) + } + + return nil +} + +func awsAwsjson10_serializeOpDocumentCreateBackupInput(v *CreateBackupInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.BackupName != nil { + ok := object.Key("BackupName") + ok.String(*v.BackupName) + } + + if v.TableName != nil { + ok := object.Key("TableName") + ok.String(*v.TableName) + } + + return nil +} + +func awsAwsjson10_serializeOpDocumentCreateGlobalTableInput(v *CreateGlobalTableInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.GlobalTableName != nil { + ok := object.Key("GlobalTableName") + ok.String(*v.GlobalTableName) + } + + if v.ReplicationGroup != nil { + ok := object.Key("ReplicationGroup") + if err := awsAwsjson10_serializeDocumentReplicaList(v.ReplicationGroup, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson10_serializeOpDocumentCreateTableInput(v *CreateTableInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.AttributeDefinitions != nil { + ok := object.Key("AttributeDefinitions") + if err := awsAwsjson10_serializeDocumentAttributeDefinitions(v.AttributeDefinitions, ok); err != nil { + return err + } + } + + if len(v.BillingMode) > 0 { + ok := object.Key("BillingMode") + ok.String(string(v.BillingMode)) + } + + if v.DeletionProtectionEnabled != nil { + ok := object.Key("DeletionProtectionEnabled") + ok.Boolean(*v.DeletionProtectionEnabled) + } + + if v.GlobalSecondaryIndexes != nil { + ok := object.Key("GlobalSecondaryIndexes") + if err := awsAwsjson10_serializeDocumentGlobalSecondaryIndexList(v.GlobalSecondaryIndexes, ok); err != nil { + return err + } + } + + if v.KeySchema != nil { + ok := object.Key("KeySchema") + if err := awsAwsjson10_serializeDocumentKeySchema(v.KeySchema, ok); err != nil { + return err + } + } + + if v.LocalSecondaryIndexes != nil { + ok := object.Key("LocalSecondaryIndexes") + if err := awsAwsjson10_serializeDocumentLocalSecondaryIndexList(v.LocalSecondaryIndexes, ok); err != nil { + return err + } + } + + if v.OnDemandThroughput != nil { + ok := object.Key("OnDemandThroughput") + if err := awsAwsjson10_serializeDocumentOnDemandThroughput(v.OnDemandThroughput, ok); err != nil { + return err + } + } + + if v.ProvisionedThroughput != nil { + ok := object.Key("ProvisionedThroughput") + if err := awsAwsjson10_serializeDocumentProvisionedThroughput(v.ProvisionedThroughput, ok); err != nil { + return err + } + } + + if v.ResourcePolicy != nil { + ok := object.Key("ResourcePolicy") + ok.String(*v.ResourcePolicy) + } + + if v.SSESpecification != nil { + ok := object.Key("SSESpecification") + if err := awsAwsjson10_serializeDocumentSSESpecification(v.SSESpecification, ok); err != nil { + return err + } + } + + if v.StreamSpecification != nil { + ok := object.Key("StreamSpecification") + if err := awsAwsjson10_serializeDocumentStreamSpecification(v.StreamSpecification, ok); err != nil { + return err + } + } + + if len(v.TableClass) > 0 { + ok := object.Key("TableClass") + ok.String(string(v.TableClass)) + } + + if v.TableName != nil { + ok := object.Key("TableName") + ok.String(*v.TableName) + } + + if v.Tags != nil { + ok := object.Key("Tags") + if err := awsAwsjson10_serializeDocumentTagList(v.Tags, ok); err != nil { + return err + } + } + + if v.WarmThroughput != nil { + ok := object.Key("WarmThroughput") + if err := awsAwsjson10_serializeDocumentWarmThroughput(v.WarmThroughput, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson10_serializeOpDocumentDeleteBackupInput(v *DeleteBackupInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.BackupArn != nil { + ok := object.Key("BackupArn") + ok.String(*v.BackupArn) + } + + return nil +} + +func awsAwsjson10_serializeOpDocumentDeleteItemInput(v *DeleteItemInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if len(v.ConditionalOperator) > 0 { + ok := object.Key("ConditionalOperator") + ok.String(string(v.ConditionalOperator)) + } + + if v.ConditionExpression != nil { + ok := object.Key("ConditionExpression") + ok.String(*v.ConditionExpression) + } + + if v.Expected != nil { + ok := object.Key("Expected") + if err := awsAwsjson10_serializeDocumentExpectedAttributeMap(v.Expected, ok); err != nil { + return err + } + } + + if v.ExpressionAttributeNames != nil { + ok := object.Key("ExpressionAttributeNames") + if err := awsAwsjson10_serializeDocumentExpressionAttributeNameMap(v.ExpressionAttributeNames, ok); err != nil { + return err + } + } + + if v.ExpressionAttributeValues != nil { + ok := object.Key("ExpressionAttributeValues") + if err := awsAwsjson10_serializeDocumentExpressionAttributeValueMap(v.ExpressionAttributeValues, ok); err != nil { + return err + } + } + + if v.Key != nil { + ok := object.Key("Key") + if err := awsAwsjson10_serializeDocumentKey(v.Key, ok); err != nil { + return err + } + } + + if len(v.ReturnConsumedCapacity) > 0 { + ok := object.Key("ReturnConsumedCapacity") + ok.String(string(v.ReturnConsumedCapacity)) + } + + if len(v.ReturnItemCollectionMetrics) > 0 { + ok := object.Key("ReturnItemCollectionMetrics") + ok.String(string(v.ReturnItemCollectionMetrics)) + } + + if len(v.ReturnValues) > 0 { + ok := object.Key("ReturnValues") + ok.String(string(v.ReturnValues)) + } + + if len(v.ReturnValuesOnConditionCheckFailure) > 0 { + ok := object.Key("ReturnValuesOnConditionCheckFailure") + ok.String(string(v.ReturnValuesOnConditionCheckFailure)) + } + + if v.TableName != nil { + ok := object.Key("TableName") + ok.String(*v.TableName) + } + + return nil +} + +func awsAwsjson10_serializeOpDocumentDeleteResourcePolicyInput(v *DeleteResourcePolicyInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ExpectedRevisionId != nil { + ok := object.Key("ExpectedRevisionId") + ok.String(*v.ExpectedRevisionId) + } + + if v.ResourceArn != nil { + ok := object.Key("ResourceArn") + ok.String(*v.ResourceArn) + } + + return nil +} + +func awsAwsjson10_serializeOpDocumentDeleteTableInput(v *DeleteTableInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.TableName != nil { + ok := object.Key("TableName") + ok.String(*v.TableName) + } + + return nil +} + +func awsAwsjson10_serializeOpDocumentDescribeBackupInput(v *DescribeBackupInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.BackupArn != nil { + ok := object.Key("BackupArn") + ok.String(*v.BackupArn) + } + + return nil +} + +func awsAwsjson10_serializeOpDocumentDescribeContinuousBackupsInput(v *DescribeContinuousBackupsInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.TableName != nil { + ok := object.Key("TableName") + ok.String(*v.TableName) + } + + return nil +} + +func awsAwsjson10_serializeOpDocumentDescribeContributorInsightsInput(v *DescribeContributorInsightsInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.IndexName != nil { + ok := object.Key("IndexName") + ok.String(*v.IndexName) + } + + if v.TableName != nil { + ok := object.Key("TableName") + ok.String(*v.TableName) + } + + return nil +} + +func awsAwsjson10_serializeOpDocumentDescribeEndpointsInput(v *DescribeEndpointsInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + return nil +} + +func awsAwsjson10_serializeOpDocumentDescribeExportInput(v *DescribeExportInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ExportArn != nil { + ok := object.Key("ExportArn") + ok.String(*v.ExportArn) + } + + return nil +} + +func awsAwsjson10_serializeOpDocumentDescribeGlobalTableInput(v *DescribeGlobalTableInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.GlobalTableName != nil { + ok := object.Key("GlobalTableName") + ok.String(*v.GlobalTableName) + } + + return nil +} + +func awsAwsjson10_serializeOpDocumentDescribeGlobalTableSettingsInput(v *DescribeGlobalTableSettingsInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.GlobalTableName != nil { + ok := object.Key("GlobalTableName") + ok.String(*v.GlobalTableName) + } + + return nil +} + +func awsAwsjson10_serializeOpDocumentDescribeImportInput(v *DescribeImportInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ImportArn != nil { + ok := object.Key("ImportArn") + ok.String(*v.ImportArn) + } + + return nil +} + +func awsAwsjson10_serializeOpDocumentDescribeKinesisStreamingDestinationInput(v *DescribeKinesisStreamingDestinationInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.TableName != nil { + ok := object.Key("TableName") + ok.String(*v.TableName) + } + + return nil +} + +func awsAwsjson10_serializeOpDocumentDescribeLimitsInput(v *DescribeLimitsInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + return nil +} + +func awsAwsjson10_serializeOpDocumentDescribeTableInput(v *DescribeTableInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.TableName != nil { + ok := object.Key("TableName") + ok.String(*v.TableName) + } + + return nil +} + +func awsAwsjson10_serializeOpDocumentDescribeTableReplicaAutoScalingInput(v *DescribeTableReplicaAutoScalingInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.TableName != nil { + ok := object.Key("TableName") + ok.String(*v.TableName) + } + + return nil +} + +func awsAwsjson10_serializeOpDocumentDescribeTimeToLiveInput(v *DescribeTimeToLiveInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.TableName != nil { + ok := object.Key("TableName") + ok.String(*v.TableName) + } + + return nil +} + +func awsAwsjson10_serializeOpDocumentDisableKinesisStreamingDestinationInput(v *DisableKinesisStreamingDestinationInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.EnableKinesisStreamingConfiguration != nil { + ok := object.Key("EnableKinesisStreamingConfiguration") + if err := awsAwsjson10_serializeDocumentEnableKinesisStreamingConfiguration(v.EnableKinesisStreamingConfiguration, ok); err != nil { + return err + } + } + + if v.StreamArn != nil { + ok := object.Key("StreamArn") + ok.String(*v.StreamArn) + } + + if v.TableName != nil { + ok := object.Key("TableName") + ok.String(*v.TableName) + } + + return nil +} + +func awsAwsjson10_serializeOpDocumentEnableKinesisStreamingDestinationInput(v *EnableKinesisStreamingDestinationInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.EnableKinesisStreamingConfiguration != nil { + ok := object.Key("EnableKinesisStreamingConfiguration") + if err := awsAwsjson10_serializeDocumentEnableKinesisStreamingConfiguration(v.EnableKinesisStreamingConfiguration, ok); err != nil { + return err + } + } + + if v.StreamArn != nil { + ok := object.Key("StreamArn") + ok.String(*v.StreamArn) + } + + if v.TableName != nil { + ok := object.Key("TableName") + ok.String(*v.TableName) + } + + return nil +} + +func awsAwsjson10_serializeOpDocumentExecuteStatementInput(v *ExecuteStatementInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ConsistentRead != nil { + ok := object.Key("ConsistentRead") + ok.Boolean(*v.ConsistentRead) + } + + if v.Limit != nil { + ok := object.Key("Limit") + ok.Integer(*v.Limit) + } + + if v.NextToken != nil { + ok := object.Key("NextToken") + ok.String(*v.NextToken) + } + + if v.Parameters != nil { + ok := object.Key("Parameters") + if err := awsAwsjson10_serializeDocumentPreparedStatementParameters(v.Parameters, ok); err != nil { + return err + } + } + + if len(v.ReturnConsumedCapacity) > 0 { + ok := object.Key("ReturnConsumedCapacity") + ok.String(string(v.ReturnConsumedCapacity)) + } + + if len(v.ReturnValuesOnConditionCheckFailure) > 0 { + ok := object.Key("ReturnValuesOnConditionCheckFailure") + ok.String(string(v.ReturnValuesOnConditionCheckFailure)) + } + + if v.Statement != nil { + ok := object.Key("Statement") + ok.String(*v.Statement) + } + + return nil +} + +func awsAwsjson10_serializeOpDocumentExecuteTransactionInput(v *ExecuteTransactionInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ClientRequestToken != nil { + ok := object.Key("ClientRequestToken") + ok.String(*v.ClientRequestToken) + } + + if len(v.ReturnConsumedCapacity) > 0 { + ok := object.Key("ReturnConsumedCapacity") + ok.String(string(v.ReturnConsumedCapacity)) + } + + if v.TransactStatements != nil { + ok := object.Key("TransactStatements") + if err := awsAwsjson10_serializeDocumentParameterizedStatements(v.TransactStatements, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson10_serializeOpDocumentExportTableToPointInTimeInput(v *ExportTableToPointInTimeInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ClientToken != nil { + ok := object.Key("ClientToken") + ok.String(*v.ClientToken) + } + + if len(v.ExportFormat) > 0 { + ok := object.Key("ExportFormat") + ok.String(string(v.ExportFormat)) + } + + if v.ExportTime != nil { + ok := object.Key("ExportTime") + ok.Double(smithytime.FormatEpochSeconds(*v.ExportTime)) + } + + if len(v.ExportType) > 0 { + ok := object.Key("ExportType") + ok.String(string(v.ExportType)) + } + + if v.IncrementalExportSpecification != nil { + ok := object.Key("IncrementalExportSpecification") + if err := awsAwsjson10_serializeDocumentIncrementalExportSpecification(v.IncrementalExportSpecification, ok); err != nil { + return err + } + } + + if v.S3Bucket != nil { + ok := object.Key("S3Bucket") + ok.String(*v.S3Bucket) + } + + if v.S3BucketOwner != nil { + ok := object.Key("S3BucketOwner") + ok.String(*v.S3BucketOwner) + } + + if v.S3Prefix != nil { + ok := object.Key("S3Prefix") + ok.String(*v.S3Prefix) + } + + if len(v.S3SseAlgorithm) > 0 { + ok := object.Key("S3SseAlgorithm") + ok.String(string(v.S3SseAlgorithm)) + } + + if v.S3SseKmsKeyId != nil { + ok := object.Key("S3SseKmsKeyId") + ok.String(*v.S3SseKmsKeyId) + } + + if v.TableArn != nil { + ok := object.Key("TableArn") + ok.String(*v.TableArn) + } + + return nil +} + +func awsAwsjson10_serializeOpDocumentGetItemInput(v *GetItemInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.AttributesToGet != nil { + ok := object.Key("AttributesToGet") + if err := awsAwsjson10_serializeDocumentAttributeNameList(v.AttributesToGet, ok); err != nil { + return err + } + } + + if v.ConsistentRead != nil { + ok := object.Key("ConsistentRead") + ok.Boolean(*v.ConsistentRead) + } + + if v.ExpressionAttributeNames != nil { + ok := object.Key("ExpressionAttributeNames") + if err := awsAwsjson10_serializeDocumentExpressionAttributeNameMap(v.ExpressionAttributeNames, ok); err != nil { + return err + } + } + + if v.Key != nil { + ok := object.Key("Key") + if err := awsAwsjson10_serializeDocumentKey(v.Key, ok); err != nil { + return err + } + } + + if v.ProjectionExpression != nil { + ok := object.Key("ProjectionExpression") + ok.String(*v.ProjectionExpression) + } + + if len(v.ReturnConsumedCapacity) > 0 { + ok := object.Key("ReturnConsumedCapacity") + ok.String(string(v.ReturnConsumedCapacity)) + } + + if v.TableName != nil { + ok := object.Key("TableName") + ok.String(*v.TableName) + } + + return nil +} + +func awsAwsjson10_serializeOpDocumentGetResourcePolicyInput(v *GetResourcePolicyInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ResourceArn != nil { + ok := object.Key("ResourceArn") + ok.String(*v.ResourceArn) + } + + return nil +} + +func awsAwsjson10_serializeOpDocumentImportTableInput(v *ImportTableInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ClientToken != nil { + ok := object.Key("ClientToken") + ok.String(*v.ClientToken) + } + + if len(v.InputCompressionType) > 0 { + ok := object.Key("InputCompressionType") + ok.String(string(v.InputCompressionType)) + } + + if len(v.InputFormat) > 0 { + ok := object.Key("InputFormat") + ok.String(string(v.InputFormat)) + } + + if v.InputFormatOptions != nil { + ok := object.Key("InputFormatOptions") + if err := awsAwsjson10_serializeDocumentInputFormatOptions(v.InputFormatOptions, ok); err != nil { + return err + } + } + + if v.S3BucketSource != nil { + ok := object.Key("S3BucketSource") + if err := awsAwsjson10_serializeDocumentS3BucketSource(v.S3BucketSource, ok); err != nil { + return err + } + } + + if v.TableCreationParameters != nil { + ok := object.Key("TableCreationParameters") + if err := awsAwsjson10_serializeDocumentTableCreationParameters(v.TableCreationParameters, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson10_serializeOpDocumentListBackupsInput(v *ListBackupsInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if len(v.BackupType) > 0 { + ok := object.Key("BackupType") + ok.String(string(v.BackupType)) + } + + if v.ExclusiveStartBackupArn != nil { + ok := object.Key("ExclusiveStartBackupArn") + ok.String(*v.ExclusiveStartBackupArn) + } + + if v.Limit != nil { + ok := object.Key("Limit") + ok.Integer(*v.Limit) + } + + if v.TableName != nil { + ok := object.Key("TableName") + ok.String(*v.TableName) + } + + if v.TimeRangeLowerBound != nil { + ok := object.Key("TimeRangeLowerBound") + ok.Double(smithytime.FormatEpochSeconds(*v.TimeRangeLowerBound)) + } + + if v.TimeRangeUpperBound != nil { + ok := object.Key("TimeRangeUpperBound") + ok.Double(smithytime.FormatEpochSeconds(*v.TimeRangeUpperBound)) + } + + return nil +} + +func awsAwsjson10_serializeOpDocumentListContributorInsightsInput(v *ListContributorInsightsInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.MaxResults != 0 { + ok := object.Key("MaxResults") + ok.Integer(v.MaxResults) + } + + if v.NextToken != nil { + ok := object.Key("NextToken") + ok.String(*v.NextToken) + } + + if v.TableName != nil { + ok := object.Key("TableName") + ok.String(*v.TableName) + } + + return nil +} + +func awsAwsjson10_serializeOpDocumentListExportsInput(v *ListExportsInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.MaxResults != nil { + ok := object.Key("MaxResults") + ok.Integer(*v.MaxResults) + } + + if v.NextToken != nil { + ok := object.Key("NextToken") + ok.String(*v.NextToken) + } + + if v.TableArn != nil { + ok := object.Key("TableArn") + ok.String(*v.TableArn) + } + + return nil +} + +func awsAwsjson10_serializeOpDocumentListGlobalTablesInput(v *ListGlobalTablesInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ExclusiveStartGlobalTableName != nil { + ok := object.Key("ExclusiveStartGlobalTableName") + ok.String(*v.ExclusiveStartGlobalTableName) + } + + if v.Limit != nil { + ok := object.Key("Limit") + ok.Integer(*v.Limit) + } + + if v.RegionName != nil { + ok := object.Key("RegionName") + ok.String(*v.RegionName) + } + + return nil +} + +func awsAwsjson10_serializeOpDocumentListImportsInput(v *ListImportsInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.NextToken != nil { + ok := object.Key("NextToken") + ok.String(*v.NextToken) + } + + if v.PageSize != nil { + ok := object.Key("PageSize") + ok.Integer(*v.PageSize) + } + + if v.TableArn != nil { + ok := object.Key("TableArn") + ok.String(*v.TableArn) + } + + return nil +} + +func awsAwsjson10_serializeOpDocumentListTablesInput(v *ListTablesInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ExclusiveStartTableName != nil { + ok := object.Key("ExclusiveStartTableName") + ok.String(*v.ExclusiveStartTableName) + } + + if v.Limit != nil { + ok := object.Key("Limit") + ok.Integer(*v.Limit) + } + + return nil +} + +func awsAwsjson10_serializeOpDocumentListTagsOfResourceInput(v *ListTagsOfResourceInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.NextToken != nil { + ok := object.Key("NextToken") + ok.String(*v.NextToken) + } + + if v.ResourceArn != nil { + ok := object.Key("ResourceArn") + ok.String(*v.ResourceArn) + } + + return nil +} + +func awsAwsjson10_serializeOpDocumentPutItemInput(v *PutItemInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if len(v.ConditionalOperator) > 0 { + ok := object.Key("ConditionalOperator") + ok.String(string(v.ConditionalOperator)) + } + + if v.ConditionExpression != nil { + ok := object.Key("ConditionExpression") + ok.String(*v.ConditionExpression) + } + + if v.Expected != nil { + ok := object.Key("Expected") + if err := awsAwsjson10_serializeDocumentExpectedAttributeMap(v.Expected, ok); err != nil { + return err + } + } + + if v.ExpressionAttributeNames != nil { + ok := object.Key("ExpressionAttributeNames") + if err := awsAwsjson10_serializeDocumentExpressionAttributeNameMap(v.ExpressionAttributeNames, ok); err != nil { + return err + } + } + + if v.ExpressionAttributeValues != nil { + ok := object.Key("ExpressionAttributeValues") + if err := awsAwsjson10_serializeDocumentExpressionAttributeValueMap(v.ExpressionAttributeValues, ok); err != nil { + return err + } + } + + if v.Item != nil { + ok := object.Key("Item") + if err := awsAwsjson10_serializeDocumentPutItemInputAttributeMap(v.Item, ok); err != nil { + return err + } + } + + if len(v.ReturnConsumedCapacity) > 0 { + ok := object.Key("ReturnConsumedCapacity") + ok.String(string(v.ReturnConsumedCapacity)) + } + + if len(v.ReturnItemCollectionMetrics) > 0 { + ok := object.Key("ReturnItemCollectionMetrics") + ok.String(string(v.ReturnItemCollectionMetrics)) + } + + if len(v.ReturnValues) > 0 { + ok := object.Key("ReturnValues") + ok.String(string(v.ReturnValues)) + } + + if len(v.ReturnValuesOnConditionCheckFailure) > 0 { + ok := object.Key("ReturnValuesOnConditionCheckFailure") + ok.String(string(v.ReturnValuesOnConditionCheckFailure)) + } + + if v.TableName != nil { + ok := object.Key("TableName") + ok.String(*v.TableName) + } + + return nil +} + +func awsAwsjson10_serializeOpDocumentPutResourcePolicyInput(v *PutResourcePolicyInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ConfirmRemoveSelfResourceAccess { + ok := object.Key("ConfirmRemoveSelfResourceAccess") + ok.Boolean(v.ConfirmRemoveSelfResourceAccess) + } + + if v.ExpectedRevisionId != nil { + ok := object.Key("ExpectedRevisionId") + ok.String(*v.ExpectedRevisionId) + } + + if v.Policy != nil { + ok := object.Key("Policy") + ok.String(*v.Policy) + } + + if v.ResourceArn != nil { + ok := object.Key("ResourceArn") + ok.String(*v.ResourceArn) + } + + return nil +} + +func awsAwsjson10_serializeOpDocumentQueryInput(v *QueryInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.AttributesToGet != nil { + ok := object.Key("AttributesToGet") + if err := awsAwsjson10_serializeDocumentAttributeNameList(v.AttributesToGet, ok); err != nil { + return err + } + } + + if len(v.ConditionalOperator) > 0 { + ok := object.Key("ConditionalOperator") + ok.String(string(v.ConditionalOperator)) + } + + if v.ConsistentRead != nil { + ok := object.Key("ConsistentRead") + ok.Boolean(*v.ConsistentRead) + } + + if v.ExclusiveStartKey != nil { + ok := object.Key("ExclusiveStartKey") + if err := awsAwsjson10_serializeDocumentKey(v.ExclusiveStartKey, ok); err != nil { + return err + } + } + + if v.ExpressionAttributeNames != nil { + ok := object.Key("ExpressionAttributeNames") + if err := awsAwsjson10_serializeDocumentExpressionAttributeNameMap(v.ExpressionAttributeNames, ok); err != nil { + return err + } + } + + if v.ExpressionAttributeValues != nil { + ok := object.Key("ExpressionAttributeValues") + if err := awsAwsjson10_serializeDocumentExpressionAttributeValueMap(v.ExpressionAttributeValues, ok); err != nil { + return err + } + } + + if v.FilterExpression != nil { + ok := object.Key("FilterExpression") + ok.String(*v.FilterExpression) + } + + if v.IndexName != nil { + ok := object.Key("IndexName") + ok.String(*v.IndexName) + } + + if v.KeyConditionExpression != nil { + ok := object.Key("KeyConditionExpression") + ok.String(*v.KeyConditionExpression) + } + + if v.KeyConditions != nil { + ok := object.Key("KeyConditions") + if err := awsAwsjson10_serializeDocumentKeyConditions(v.KeyConditions, ok); err != nil { + return err + } + } + + if v.Limit != nil { + ok := object.Key("Limit") + ok.Integer(*v.Limit) + } + + if v.ProjectionExpression != nil { + ok := object.Key("ProjectionExpression") + ok.String(*v.ProjectionExpression) + } + + if v.QueryFilter != nil { + ok := object.Key("QueryFilter") + if err := awsAwsjson10_serializeDocumentFilterConditionMap(v.QueryFilter, ok); err != nil { + return err + } + } + + if len(v.ReturnConsumedCapacity) > 0 { + ok := object.Key("ReturnConsumedCapacity") + ok.String(string(v.ReturnConsumedCapacity)) + } + + if v.ScanIndexForward != nil { + ok := object.Key("ScanIndexForward") + ok.Boolean(*v.ScanIndexForward) + } + + if len(v.Select) > 0 { + ok := object.Key("Select") + ok.String(string(v.Select)) + } + + if v.TableName != nil { + ok := object.Key("TableName") + ok.String(*v.TableName) + } + + return nil +} + +func awsAwsjson10_serializeOpDocumentRestoreTableFromBackupInput(v *RestoreTableFromBackupInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.BackupArn != nil { + ok := object.Key("BackupArn") + ok.String(*v.BackupArn) + } + + if len(v.BillingModeOverride) > 0 { + ok := object.Key("BillingModeOverride") + ok.String(string(v.BillingModeOverride)) + } + + if v.GlobalSecondaryIndexOverride != nil { + ok := object.Key("GlobalSecondaryIndexOverride") + if err := awsAwsjson10_serializeDocumentGlobalSecondaryIndexList(v.GlobalSecondaryIndexOverride, ok); err != nil { + return err + } + } + + if v.LocalSecondaryIndexOverride != nil { + ok := object.Key("LocalSecondaryIndexOverride") + if err := awsAwsjson10_serializeDocumentLocalSecondaryIndexList(v.LocalSecondaryIndexOverride, ok); err != nil { + return err + } + } + + if v.OnDemandThroughputOverride != nil { + ok := object.Key("OnDemandThroughputOverride") + if err := awsAwsjson10_serializeDocumentOnDemandThroughput(v.OnDemandThroughputOverride, ok); err != nil { + return err + } + } + + if v.ProvisionedThroughputOverride != nil { + ok := object.Key("ProvisionedThroughputOverride") + if err := awsAwsjson10_serializeDocumentProvisionedThroughput(v.ProvisionedThroughputOverride, ok); err != nil { + return err + } + } + + if v.SSESpecificationOverride != nil { + ok := object.Key("SSESpecificationOverride") + if err := awsAwsjson10_serializeDocumentSSESpecification(v.SSESpecificationOverride, ok); err != nil { + return err + } + } + + if v.TargetTableName != nil { + ok := object.Key("TargetTableName") + ok.String(*v.TargetTableName) + } + + return nil +} + +func awsAwsjson10_serializeOpDocumentRestoreTableToPointInTimeInput(v *RestoreTableToPointInTimeInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if len(v.BillingModeOverride) > 0 { + ok := object.Key("BillingModeOverride") + ok.String(string(v.BillingModeOverride)) + } + + if v.GlobalSecondaryIndexOverride != nil { + ok := object.Key("GlobalSecondaryIndexOverride") + if err := awsAwsjson10_serializeDocumentGlobalSecondaryIndexList(v.GlobalSecondaryIndexOverride, ok); err != nil { + return err + } + } + + if v.LocalSecondaryIndexOverride != nil { + ok := object.Key("LocalSecondaryIndexOverride") + if err := awsAwsjson10_serializeDocumentLocalSecondaryIndexList(v.LocalSecondaryIndexOverride, ok); err != nil { + return err + } + } + + if v.OnDemandThroughputOverride != nil { + ok := object.Key("OnDemandThroughputOverride") + if err := awsAwsjson10_serializeDocumentOnDemandThroughput(v.OnDemandThroughputOverride, ok); err != nil { + return err + } + } + + if v.ProvisionedThroughputOverride != nil { + ok := object.Key("ProvisionedThroughputOverride") + if err := awsAwsjson10_serializeDocumentProvisionedThroughput(v.ProvisionedThroughputOverride, ok); err != nil { + return err + } + } + + if v.RestoreDateTime != nil { + ok := object.Key("RestoreDateTime") + ok.Double(smithytime.FormatEpochSeconds(*v.RestoreDateTime)) + } + + if v.SourceTableArn != nil { + ok := object.Key("SourceTableArn") + ok.String(*v.SourceTableArn) + } + + if v.SourceTableName != nil { + ok := object.Key("SourceTableName") + ok.String(*v.SourceTableName) + } + + if v.SSESpecificationOverride != nil { + ok := object.Key("SSESpecificationOverride") + if err := awsAwsjson10_serializeDocumentSSESpecification(v.SSESpecificationOverride, ok); err != nil { + return err + } + } + + if v.TargetTableName != nil { + ok := object.Key("TargetTableName") + ok.String(*v.TargetTableName) + } + + if v.UseLatestRestorableTime != nil { + ok := object.Key("UseLatestRestorableTime") + ok.Boolean(*v.UseLatestRestorableTime) + } + + return nil +} + +func awsAwsjson10_serializeOpDocumentScanInput(v *ScanInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.AttributesToGet != nil { + ok := object.Key("AttributesToGet") + if err := awsAwsjson10_serializeDocumentAttributeNameList(v.AttributesToGet, ok); err != nil { + return err + } + } + + if len(v.ConditionalOperator) > 0 { + ok := object.Key("ConditionalOperator") + ok.String(string(v.ConditionalOperator)) + } + + if v.ConsistentRead != nil { + ok := object.Key("ConsistentRead") + ok.Boolean(*v.ConsistentRead) + } + + if v.ExclusiveStartKey != nil { + ok := object.Key("ExclusiveStartKey") + if err := awsAwsjson10_serializeDocumentKey(v.ExclusiveStartKey, ok); err != nil { + return err + } + } + + if v.ExpressionAttributeNames != nil { + ok := object.Key("ExpressionAttributeNames") + if err := awsAwsjson10_serializeDocumentExpressionAttributeNameMap(v.ExpressionAttributeNames, ok); err != nil { + return err + } + } + + if v.ExpressionAttributeValues != nil { + ok := object.Key("ExpressionAttributeValues") + if err := awsAwsjson10_serializeDocumentExpressionAttributeValueMap(v.ExpressionAttributeValues, ok); err != nil { + return err + } + } + + if v.FilterExpression != nil { + ok := object.Key("FilterExpression") + ok.String(*v.FilterExpression) + } + + if v.IndexName != nil { + ok := object.Key("IndexName") + ok.String(*v.IndexName) + } + + if v.Limit != nil { + ok := object.Key("Limit") + ok.Integer(*v.Limit) + } + + if v.ProjectionExpression != nil { + ok := object.Key("ProjectionExpression") + ok.String(*v.ProjectionExpression) + } + + if len(v.ReturnConsumedCapacity) > 0 { + ok := object.Key("ReturnConsumedCapacity") + ok.String(string(v.ReturnConsumedCapacity)) + } + + if v.ScanFilter != nil { + ok := object.Key("ScanFilter") + if err := awsAwsjson10_serializeDocumentFilterConditionMap(v.ScanFilter, ok); err != nil { + return err + } + } + + if v.Segment != nil { + ok := object.Key("Segment") + ok.Integer(*v.Segment) + } + + if len(v.Select) > 0 { + ok := object.Key("Select") + ok.String(string(v.Select)) + } + + if v.TableName != nil { + ok := object.Key("TableName") + ok.String(*v.TableName) + } + + if v.TotalSegments != nil { + ok := object.Key("TotalSegments") + ok.Integer(*v.TotalSegments) + } + + return nil +} + +func awsAwsjson10_serializeOpDocumentTagResourceInput(v *TagResourceInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ResourceArn != nil { + ok := object.Key("ResourceArn") + ok.String(*v.ResourceArn) + } + + if v.Tags != nil { + ok := object.Key("Tags") + if err := awsAwsjson10_serializeDocumentTagList(v.Tags, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson10_serializeOpDocumentTransactGetItemsInput(v *TransactGetItemsInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if len(v.ReturnConsumedCapacity) > 0 { + ok := object.Key("ReturnConsumedCapacity") + ok.String(string(v.ReturnConsumedCapacity)) + } + + if v.TransactItems != nil { + ok := object.Key("TransactItems") + if err := awsAwsjson10_serializeDocumentTransactGetItemList(v.TransactItems, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson10_serializeOpDocumentTransactWriteItemsInput(v *TransactWriteItemsInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ClientRequestToken != nil { + ok := object.Key("ClientRequestToken") + ok.String(*v.ClientRequestToken) + } + + if len(v.ReturnConsumedCapacity) > 0 { + ok := object.Key("ReturnConsumedCapacity") + ok.String(string(v.ReturnConsumedCapacity)) + } + + if len(v.ReturnItemCollectionMetrics) > 0 { + ok := object.Key("ReturnItemCollectionMetrics") + ok.String(string(v.ReturnItemCollectionMetrics)) + } + + if v.TransactItems != nil { + ok := object.Key("TransactItems") + if err := awsAwsjson10_serializeDocumentTransactWriteItemList(v.TransactItems, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson10_serializeOpDocumentUntagResourceInput(v *UntagResourceInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ResourceArn != nil { + ok := object.Key("ResourceArn") + ok.String(*v.ResourceArn) + } + + if v.TagKeys != nil { + ok := object.Key("TagKeys") + if err := awsAwsjson10_serializeDocumentTagKeyList(v.TagKeys, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson10_serializeOpDocumentUpdateContinuousBackupsInput(v *UpdateContinuousBackupsInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.PointInTimeRecoverySpecification != nil { + ok := object.Key("PointInTimeRecoverySpecification") + if err := awsAwsjson10_serializeDocumentPointInTimeRecoverySpecification(v.PointInTimeRecoverySpecification, ok); err != nil { + return err + } + } + + if v.TableName != nil { + ok := object.Key("TableName") + ok.String(*v.TableName) + } + + return nil +} + +func awsAwsjson10_serializeOpDocumentUpdateContributorInsightsInput(v *UpdateContributorInsightsInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if len(v.ContributorInsightsAction) > 0 { + ok := object.Key("ContributorInsightsAction") + ok.String(string(v.ContributorInsightsAction)) + } + + if len(v.ContributorInsightsMode) > 0 { + ok := object.Key("ContributorInsightsMode") + ok.String(string(v.ContributorInsightsMode)) + } + + if v.IndexName != nil { + ok := object.Key("IndexName") + ok.String(*v.IndexName) + } + + if v.TableName != nil { + ok := object.Key("TableName") + ok.String(*v.TableName) + } + + return nil +} + +func awsAwsjson10_serializeOpDocumentUpdateGlobalTableInput(v *UpdateGlobalTableInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.GlobalTableName != nil { + ok := object.Key("GlobalTableName") + ok.String(*v.GlobalTableName) + } + + if v.ReplicaUpdates != nil { + ok := object.Key("ReplicaUpdates") + if err := awsAwsjson10_serializeDocumentReplicaUpdateList(v.ReplicaUpdates, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson10_serializeOpDocumentUpdateGlobalTableSettingsInput(v *UpdateGlobalTableSettingsInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if len(v.GlobalTableBillingMode) > 0 { + ok := object.Key("GlobalTableBillingMode") + ok.String(string(v.GlobalTableBillingMode)) + } + + if v.GlobalTableGlobalSecondaryIndexSettingsUpdate != nil { + ok := object.Key("GlobalTableGlobalSecondaryIndexSettingsUpdate") + if err := awsAwsjson10_serializeDocumentGlobalTableGlobalSecondaryIndexSettingsUpdateList(v.GlobalTableGlobalSecondaryIndexSettingsUpdate, ok); err != nil { + return err + } + } + + if v.GlobalTableName != nil { + ok := object.Key("GlobalTableName") + ok.String(*v.GlobalTableName) + } + + if v.GlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate != nil { + ok := object.Key("GlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate") + if err := awsAwsjson10_serializeDocumentAutoScalingSettingsUpdate(v.GlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate, ok); err != nil { + return err + } + } + + if v.GlobalTableProvisionedWriteCapacityUnits != nil { + ok := object.Key("GlobalTableProvisionedWriteCapacityUnits") + ok.Long(*v.GlobalTableProvisionedWriteCapacityUnits) + } + + if v.ReplicaSettingsUpdate != nil { + ok := object.Key("ReplicaSettingsUpdate") + if err := awsAwsjson10_serializeDocumentReplicaSettingsUpdateList(v.ReplicaSettingsUpdate, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson10_serializeOpDocumentUpdateItemInput(v *UpdateItemInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.AttributeUpdates != nil { + ok := object.Key("AttributeUpdates") + if err := awsAwsjson10_serializeDocumentAttributeUpdates(v.AttributeUpdates, ok); err != nil { + return err + } + } + + if len(v.ConditionalOperator) > 0 { + ok := object.Key("ConditionalOperator") + ok.String(string(v.ConditionalOperator)) + } + + if v.ConditionExpression != nil { + ok := object.Key("ConditionExpression") + ok.String(*v.ConditionExpression) + } + + if v.Expected != nil { + ok := object.Key("Expected") + if err := awsAwsjson10_serializeDocumentExpectedAttributeMap(v.Expected, ok); err != nil { + return err + } + } + + if v.ExpressionAttributeNames != nil { + ok := object.Key("ExpressionAttributeNames") + if err := awsAwsjson10_serializeDocumentExpressionAttributeNameMap(v.ExpressionAttributeNames, ok); err != nil { + return err + } + } + + if v.ExpressionAttributeValues != nil { + ok := object.Key("ExpressionAttributeValues") + if err := awsAwsjson10_serializeDocumentExpressionAttributeValueMap(v.ExpressionAttributeValues, ok); err != nil { + return err + } + } + + if v.Key != nil { + ok := object.Key("Key") + if err := awsAwsjson10_serializeDocumentKey(v.Key, ok); err != nil { + return err + } + } + + if len(v.ReturnConsumedCapacity) > 0 { + ok := object.Key("ReturnConsumedCapacity") + ok.String(string(v.ReturnConsumedCapacity)) + } + + if len(v.ReturnItemCollectionMetrics) > 0 { + ok := object.Key("ReturnItemCollectionMetrics") + ok.String(string(v.ReturnItemCollectionMetrics)) + } + + if len(v.ReturnValues) > 0 { + ok := object.Key("ReturnValues") + ok.String(string(v.ReturnValues)) + } + + if len(v.ReturnValuesOnConditionCheckFailure) > 0 { + ok := object.Key("ReturnValuesOnConditionCheckFailure") + ok.String(string(v.ReturnValuesOnConditionCheckFailure)) + } + + if v.TableName != nil { + ok := object.Key("TableName") + ok.String(*v.TableName) + } + + if v.UpdateExpression != nil { + ok := object.Key("UpdateExpression") + ok.String(*v.UpdateExpression) + } + + return nil +} + +func awsAwsjson10_serializeOpDocumentUpdateKinesisStreamingDestinationInput(v *UpdateKinesisStreamingDestinationInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.StreamArn != nil { + ok := object.Key("StreamArn") + ok.String(*v.StreamArn) + } + + if v.TableName != nil { + ok := object.Key("TableName") + ok.String(*v.TableName) + } + + if v.UpdateKinesisStreamingConfiguration != nil { + ok := object.Key("UpdateKinesisStreamingConfiguration") + if err := awsAwsjson10_serializeDocumentUpdateKinesisStreamingConfiguration(v.UpdateKinesisStreamingConfiguration, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson10_serializeOpDocumentUpdateTableInput(v *UpdateTableInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.AttributeDefinitions != nil { + ok := object.Key("AttributeDefinitions") + if err := awsAwsjson10_serializeDocumentAttributeDefinitions(v.AttributeDefinitions, ok); err != nil { + return err + } + } + + if len(v.BillingMode) > 0 { + ok := object.Key("BillingMode") + ok.String(string(v.BillingMode)) + } + + if v.DeletionProtectionEnabled != nil { + ok := object.Key("DeletionProtectionEnabled") + ok.Boolean(*v.DeletionProtectionEnabled) + } + + if v.GlobalSecondaryIndexUpdates != nil { + ok := object.Key("GlobalSecondaryIndexUpdates") + if err := awsAwsjson10_serializeDocumentGlobalSecondaryIndexUpdateList(v.GlobalSecondaryIndexUpdates, ok); err != nil { + return err + } + } + + if v.GlobalTableWitnessUpdates != nil { + ok := object.Key("GlobalTableWitnessUpdates") + if err := awsAwsjson10_serializeDocumentGlobalTableWitnessGroupUpdateList(v.GlobalTableWitnessUpdates, ok); err != nil { + return err + } + } + + if len(v.MultiRegionConsistency) > 0 { + ok := object.Key("MultiRegionConsistency") + ok.String(string(v.MultiRegionConsistency)) + } + + if v.OnDemandThroughput != nil { + ok := object.Key("OnDemandThroughput") + if err := awsAwsjson10_serializeDocumentOnDemandThroughput(v.OnDemandThroughput, ok); err != nil { + return err + } + } + + if v.ProvisionedThroughput != nil { + ok := object.Key("ProvisionedThroughput") + if err := awsAwsjson10_serializeDocumentProvisionedThroughput(v.ProvisionedThroughput, ok); err != nil { + return err + } + } + + if v.ReplicaUpdates != nil { + ok := object.Key("ReplicaUpdates") + if err := awsAwsjson10_serializeDocumentReplicationGroupUpdateList(v.ReplicaUpdates, ok); err != nil { + return err + } + } + + if v.SSESpecification != nil { + ok := object.Key("SSESpecification") + if err := awsAwsjson10_serializeDocumentSSESpecification(v.SSESpecification, ok); err != nil { + return err + } + } + + if v.StreamSpecification != nil { + ok := object.Key("StreamSpecification") + if err := awsAwsjson10_serializeDocumentStreamSpecification(v.StreamSpecification, ok); err != nil { + return err + } + } + + if len(v.TableClass) > 0 { + ok := object.Key("TableClass") + ok.String(string(v.TableClass)) + } + + if v.TableName != nil { + ok := object.Key("TableName") + ok.String(*v.TableName) + } + + if v.WarmThroughput != nil { + ok := object.Key("WarmThroughput") + if err := awsAwsjson10_serializeDocumentWarmThroughput(v.WarmThroughput, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson10_serializeOpDocumentUpdateTableReplicaAutoScalingInput(v *UpdateTableReplicaAutoScalingInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.GlobalSecondaryIndexUpdates != nil { + ok := object.Key("GlobalSecondaryIndexUpdates") + if err := awsAwsjson10_serializeDocumentGlobalSecondaryIndexAutoScalingUpdateList(v.GlobalSecondaryIndexUpdates, ok); err != nil { + return err + } + } + + if v.ProvisionedWriteCapacityAutoScalingUpdate != nil { + ok := object.Key("ProvisionedWriteCapacityAutoScalingUpdate") + if err := awsAwsjson10_serializeDocumentAutoScalingSettingsUpdate(v.ProvisionedWriteCapacityAutoScalingUpdate, ok); err != nil { + return err + } + } + + if v.ReplicaUpdates != nil { + ok := object.Key("ReplicaUpdates") + if err := awsAwsjson10_serializeDocumentReplicaAutoScalingUpdateList(v.ReplicaUpdates, ok); err != nil { + return err + } + } + + if v.TableName != nil { + ok := object.Key("TableName") + ok.String(*v.TableName) + } + + return nil +} + +func awsAwsjson10_serializeOpDocumentUpdateTimeToLiveInput(v *UpdateTimeToLiveInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.TableName != nil { + ok := object.Key("TableName") + ok.String(*v.TableName) + } + + if v.TimeToLiveSpecification != nil { + ok := object.Key("TimeToLiveSpecification") + if err := awsAwsjson10_serializeDocumentTimeToLiveSpecification(v.TimeToLiveSpecification, ok); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/types/enums.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/types/enums.go new file mode 100644 index 0000000000..47d43d57e5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/types/enums.go @@ -0,0 +1,947 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +type ApproximateCreationDateTimePrecision string + +// Enum values for ApproximateCreationDateTimePrecision +const ( + ApproximateCreationDateTimePrecisionMillisecond ApproximateCreationDateTimePrecision = "MILLISECOND" + ApproximateCreationDateTimePrecisionMicrosecond ApproximateCreationDateTimePrecision = "MICROSECOND" +) + +// Values returns all known values for ApproximateCreationDateTimePrecision. Note +// that this can be expanded in the future, and so it is only as up to date as the +// client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (ApproximateCreationDateTimePrecision) Values() []ApproximateCreationDateTimePrecision { + return []ApproximateCreationDateTimePrecision{ + "MILLISECOND", + "MICROSECOND", + } +} + +type AttributeAction string + +// Enum values for AttributeAction +const ( + AttributeActionAdd AttributeAction = "ADD" + AttributeActionPut AttributeAction = "PUT" + AttributeActionDelete AttributeAction = "DELETE" +) + +// Values returns all known values for AttributeAction. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (AttributeAction) Values() []AttributeAction { + return []AttributeAction{ + "ADD", + "PUT", + "DELETE", + } +} + +type BackupStatus string + +// Enum values for BackupStatus +const ( + BackupStatusCreating BackupStatus = "CREATING" + BackupStatusDeleted BackupStatus = "DELETED" + BackupStatusAvailable BackupStatus = "AVAILABLE" +) + +// Values returns all known values for BackupStatus. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (BackupStatus) Values() []BackupStatus { + return []BackupStatus{ + "CREATING", + "DELETED", + "AVAILABLE", + } +} + +type BackupType string + +// Enum values for BackupType +const ( + BackupTypeUser BackupType = "USER" + BackupTypeSystem BackupType = "SYSTEM" + BackupTypeAwsBackup BackupType = "AWS_BACKUP" +) + +// Values returns all known values for BackupType. Note that this can be expanded +// in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (BackupType) Values() []BackupType { + return []BackupType{ + "USER", + "SYSTEM", + "AWS_BACKUP", + } +} + +type BackupTypeFilter string + +// Enum values for BackupTypeFilter +const ( + BackupTypeFilterUser BackupTypeFilter = "USER" + BackupTypeFilterSystem BackupTypeFilter = "SYSTEM" + BackupTypeFilterAwsBackup BackupTypeFilter = "AWS_BACKUP" + BackupTypeFilterAll BackupTypeFilter = "ALL" +) + +// Values returns all known values for BackupTypeFilter. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (BackupTypeFilter) Values() []BackupTypeFilter { + return []BackupTypeFilter{ + "USER", + "SYSTEM", + "AWS_BACKUP", + "ALL", + } +} + +type BatchStatementErrorCodeEnum string + +// Enum values for BatchStatementErrorCodeEnum +const ( + BatchStatementErrorCodeEnumConditionalCheckFailed BatchStatementErrorCodeEnum = "ConditionalCheckFailed" + BatchStatementErrorCodeEnumItemCollectionSizeLimitExceeded BatchStatementErrorCodeEnum = "ItemCollectionSizeLimitExceeded" + BatchStatementErrorCodeEnumRequestLimitExceeded BatchStatementErrorCodeEnum = "RequestLimitExceeded" + BatchStatementErrorCodeEnumValidationError BatchStatementErrorCodeEnum = "ValidationError" + BatchStatementErrorCodeEnumProvisionedThroughputExceeded BatchStatementErrorCodeEnum = "ProvisionedThroughputExceeded" + BatchStatementErrorCodeEnumTransactionConflict BatchStatementErrorCodeEnum = "TransactionConflict" + BatchStatementErrorCodeEnumThrottlingError BatchStatementErrorCodeEnum = "ThrottlingError" + BatchStatementErrorCodeEnumInternalServerError BatchStatementErrorCodeEnum = "InternalServerError" + BatchStatementErrorCodeEnumResourceNotFound BatchStatementErrorCodeEnum = "ResourceNotFound" + BatchStatementErrorCodeEnumAccessDenied BatchStatementErrorCodeEnum = "AccessDenied" + BatchStatementErrorCodeEnumDuplicateItem BatchStatementErrorCodeEnum = "DuplicateItem" +) + +// Values returns all known values for BatchStatementErrorCodeEnum. Note that this +// can be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (BatchStatementErrorCodeEnum) Values() []BatchStatementErrorCodeEnum { + return []BatchStatementErrorCodeEnum{ + "ConditionalCheckFailed", + "ItemCollectionSizeLimitExceeded", + "RequestLimitExceeded", + "ValidationError", + "ProvisionedThroughputExceeded", + "TransactionConflict", + "ThrottlingError", + "InternalServerError", + "ResourceNotFound", + "AccessDenied", + "DuplicateItem", + } +} + +type BillingMode string + +// Enum values for BillingMode +const ( + BillingModeProvisioned BillingMode = "PROVISIONED" + BillingModePayPerRequest BillingMode = "PAY_PER_REQUEST" +) + +// Values returns all known values for BillingMode. Note that this can be expanded +// in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (BillingMode) Values() []BillingMode { + return []BillingMode{ + "PROVISIONED", + "PAY_PER_REQUEST", + } +} + +type ComparisonOperator string + +// Enum values for ComparisonOperator +const ( + ComparisonOperatorEq ComparisonOperator = "EQ" + ComparisonOperatorNe ComparisonOperator = "NE" + ComparisonOperatorIn ComparisonOperator = "IN" + ComparisonOperatorLe ComparisonOperator = "LE" + ComparisonOperatorLt ComparisonOperator = "LT" + ComparisonOperatorGe ComparisonOperator = "GE" + ComparisonOperatorGt ComparisonOperator = "GT" + ComparisonOperatorBetween ComparisonOperator = "BETWEEN" + ComparisonOperatorNotNull ComparisonOperator = "NOT_NULL" + ComparisonOperatorNull ComparisonOperator = "NULL" + ComparisonOperatorContains ComparisonOperator = "CONTAINS" + ComparisonOperatorNotContains ComparisonOperator = "NOT_CONTAINS" + ComparisonOperatorBeginsWith ComparisonOperator = "BEGINS_WITH" +) + +// Values returns all known values for ComparisonOperator. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (ComparisonOperator) Values() []ComparisonOperator { + return []ComparisonOperator{ + "EQ", + "NE", + "IN", + "LE", + "LT", + "GE", + "GT", + "BETWEEN", + "NOT_NULL", + "NULL", + "CONTAINS", + "NOT_CONTAINS", + "BEGINS_WITH", + } +} + +type ConditionalOperator string + +// Enum values for ConditionalOperator +const ( + ConditionalOperatorAnd ConditionalOperator = "AND" + ConditionalOperatorOr ConditionalOperator = "OR" +) + +// Values returns all known values for ConditionalOperator. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (ConditionalOperator) Values() []ConditionalOperator { + return []ConditionalOperator{ + "AND", + "OR", + } +} + +type ContinuousBackupsStatus string + +// Enum values for ContinuousBackupsStatus +const ( + ContinuousBackupsStatusEnabled ContinuousBackupsStatus = "ENABLED" + ContinuousBackupsStatusDisabled ContinuousBackupsStatus = "DISABLED" +) + +// Values returns all known values for ContinuousBackupsStatus. Note that this can +// be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (ContinuousBackupsStatus) Values() []ContinuousBackupsStatus { + return []ContinuousBackupsStatus{ + "ENABLED", + "DISABLED", + } +} + +type ContributorInsightsAction string + +// Enum values for ContributorInsightsAction +const ( + ContributorInsightsActionEnable ContributorInsightsAction = "ENABLE" + ContributorInsightsActionDisable ContributorInsightsAction = "DISABLE" +) + +// Values returns all known values for ContributorInsightsAction. Note that this +// can be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (ContributorInsightsAction) Values() []ContributorInsightsAction { + return []ContributorInsightsAction{ + "ENABLE", + "DISABLE", + } +} + +type ContributorInsightsMode string + +// Enum values for ContributorInsightsMode +const ( + ContributorInsightsModeAccessedAndThrottledKeys ContributorInsightsMode = "ACCESSED_AND_THROTTLED_KEYS" + ContributorInsightsModeThrottledKeys ContributorInsightsMode = "THROTTLED_KEYS" +) + +// Values returns all known values for ContributorInsightsMode. Note that this can +// be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (ContributorInsightsMode) Values() []ContributorInsightsMode { + return []ContributorInsightsMode{ + "ACCESSED_AND_THROTTLED_KEYS", + "THROTTLED_KEYS", + } +} + +type ContributorInsightsStatus string + +// Enum values for ContributorInsightsStatus +const ( + ContributorInsightsStatusEnabling ContributorInsightsStatus = "ENABLING" + ContributorInsightsStatusEnabled ContributorInsightsStatus = "ENABLED" + ContributorInsightsStatusDisabling ContributorInsightsStatus = "DISABLING" + ContributorInsightsStatusDisabled ContributorInsightsStatus = "DISABLED" + ContributorInsightsStatusFailed ContributorInsightsStatus = "FAILED" +) + +// Values returns all known values for ContributorInsightsStatus. Note that this +// can be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (ContributorInsightsStatus) Values() []ContributorInsightsStatus { + return []ContributorInsightsStatus{ + "ENABLING", + "ENABLED", + "DISABLING", + "DISABLED", + "FAILED", + } +} + +type DestinationStatus string + +// Enum values for DestinationStatus +const ( + DestinationStatusEnabling DestinationStatus = "ENABLING" + DestinationStatusActive DestinationStatus = "ACTIVE" + DestinationStatusDisabling DestinationStatus = "DISABLING" + DestinationStatusDisabled DestinationStatus = "DISABLED" + DestinationStatusEnableFailed DestinationStatus = "ENABLE_FAILED" + DestinationStatusUpdating DestinationStatus = "UPDATING" +) + +// Values returns all known values for DestinationStatus. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (DestinationStatus) Values() []DestinationStatus { + return []DestinationStatus{ + "ENABLING", + "ACTIVE", + "DISABLING", + "DISABLED", + "ENABLE_FAILED", + "UPDATING", + } +} + +type ExportFormat string + +// Enum values for ExportFormat +const ( + ExportFormatDynamodbJson ExportFormat = "DYNAMODB_JSON" + ExportFormatIon ExportFormat = "ION" +) + +// Values returns all known values for ExportFormat. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (ExportFormat) Values() []ExportFormat { + return []ExportFormat{ + "DYNAMODB_JSON", + "ION", + } +} + +type ExportStatus string + +// Enum values for ExportStatus +const ( + ExportStatusInProgress ExportStatus = "IN_PROGRESS" + ExportStatusCompleted ExportStatus = "COMPLETED" + ExportStatusFailed ExportStatus = "FAILED" +) + +// Values returns all known values for ExportStatus. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (ExportStatus) Values() []ExportStatus { + return []ExportStatus{ + "IN_PROGRESS", + "COMPLETED", + "FAILED", + } +} + +type ExportType string + +// Enum values for ExportType +const ( + ExportTypeFullExport ExportType = "FULL_EXPORT" + ExportTypeIncrementalExport ExportType = "INCREMENTAL_EXPORT" +) + +// Values returns all known values for ExportType. Note that this can be expanded +// in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (ExportType) Values() []ExportType { + return []ExportType{ + "FULL_EXPORT", + "INCREMENTAL_EXPORT", + } +} + +type ExportViewType string + +// Enum values for ExportViewType +const ( + ExportViewTypeNewImage ExportViewType = "NEW_IMAGE" + ExportViewTypeNewAndOldImages ExportViewType = "NEW_AND_OLD_IMAGES" +) + +// Values returns all known values for ExportViewType. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (ExportViewType) Values() []ExportViewType { + return []ExportViewType{ + "NEW_IMAGE", + "NEW_AND_OLD_IMAGES", + } +} + +type GlobalTableStatus string + +// Enum values for GlobalTableStatus +const ( + GlobalTableStatusCreating GlobalTableStatus = "CREATING" + GlobalTableStatusActive GlobalTableStatus = "ACTIVE" + GlobalTableStatusDeleting GlobalTableStatus = "DELETING" + GlobalTableStatusUpdating GlobalTableStatus = "UPDATING" +) + +// Values returns all known values for GlobalTableStatus. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (GlobalTableStatus) Values() []GlobalTableStatus { + return []GlobalTableStatus{ + "CREATING", + "ACTIVE", + "DELETING", + "UPDATING", + } +} + +type ImportStatus string + +// Enum values for ImportStatus +const ( + ImportStatusInProgress ImportStatus = "IN_PROGRESS" + ImportStatusCompleted ImportStatus = "COMPLETED" + ImportStatusCancelling ImportStatus = "CANCELLING" + ImportStatusCancelled ImportStatus = "CANCELLED" + ImportStatusFailed ImportStatus = "FAILED" +) + +// Values returns all known values for ImportStatus. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (ImportStatus) Values() []ImportStatus { + return []ImportStatus{ + "IN_PROGRESS", + "COMPLETED", + "CANCELLING", + "CANCELLED", + "FAILED", + } +} + +type IndexStatus string + +// Enum values for IndexStatus +const ( + IndexStatusCreating IndexStatus = "CREATING" + IndexStatusUpdating IndexStatus = "UPDATING" + IndexStatusDeleting IndexStatus = "DELETING" + IndexStatusActive IndexStatus = "ACTIVE" +) + +// Values returns all known values for IndexStatus. Note that this can be expanded +// in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (IndexStatus) Values() []IndexStatus { + return []IndexStatus{ + "CREATING", + "UPDATING", + "DELETING", + "ACTIVE", + } +} + +type InputCompressionType string + +// Enum values for InputCompressionType +const ( + InputCompressionTypeGzip InputCompressionType = "GZIP" + InputCompressionTypeZstd InputCompressionType = "ZSTD" + InputCompressionTypeNone InputCompressionType = "NONE" +) + +// Values returns all known values for InputCompressionType. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (InputCompressionType) Values() []InputCompressionType { + return []InputCompressionType{ + "GZIP", + "ZSTD", + "NONE", + } +} + +type InputFormat string + +// Enum values for InputFormat +const ( + InputFormatDynamodbJson InputFormat = "DYNAMODB_JSON" + InputFormatIon InputFormat = "ION" + InputFormatCsv InputFormat = "CSV" +) + +// Values returns all known values for InputFormat. Note that this can be expanded +// in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (InputFormat) Values() []InputFormat { + return []InputFormat{ + "DYNAMODB_JSON", + "ION", + "CSV", + } +} + +type KeyType string + +// Enum values for KeyType +const ( + KeyTypeHash KeyType = "HASH" + KeyTypeRange KeyType = "RANGE" +) + +// Values returns all known values for KeyType. Note that this can be expanded in +// the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (KeyType) Values() []KeyType { + return []KeyType{ + "HASH", + "RANGE", + } +} + +type MultiRegionConsistency string + +// Enum values for MultiRegionConsistency +const ( + MultiRegionConsistencyEventual MultiRegionConsistency = "EVENTUAL" + MultiRegionConsistencyStrong MultiRegionConsistency = "STRONG" +) + +// Values returns all known values for MultiRegionConsistency. Note that this can +// be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (MultiRegionConsistency) Values() []MultiRegionConsistency { + return []MultiRegionConsistency{ + "EVENTUAL", + "STRONG", + } +} + +type PointInTimeRecoveryStatus string + +// Enum values for PointInTimeRecoveryStatus +const ( + PointInTimeRecoveryStatusEnabled PointInTimeRecoveryStatus = "ENABLED" + PointInTimeRecoveryStatusDisabled PointInTimeRecoveryStatus = "DISABLED" +) + +// Values returns all known values for PointInTimeRecoveryStatus. Note that this +// can be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (PointInTimeRecoveryStatus) Values() []PointInTimeRecoveryStatus { + return []PointInTimeRecoveryStatus{ + "ENABLED", + "DISABLED", + } +} + +type ProjectionType string + +// Enum values for ProjectionType +const ( + ProjectionTypeAll ProjectionType = "ALL" + ProjectionTypeKeysOnly ProjectionType = "KEYS_ONLY" + ProjectionTypeInclude ProjectionType = "INCLUDE" +) + +// Values returns all known values for ProjectionType. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (ProjectionType) Values() []ProjectionType { + return []ProjectionType{ + "ALL", + "KEYS_ONLY", + "INCLUDE", + } +} + +type ReplicaStatus string + +// Enum values for ReplicaStatus +const ( + ReplicaStatusCreating ReplicaStatus = "CREATING" + ReplicaStatusCreationFailed ReplicaStatus = "CREATION_FAILED" + ReplicaStatusUpdating ReplicaStatus = "UPDATING" + ReplicaStatusDeleting ReplicaStatus = "DELETING" + ReplicaStatusActive ReplicaStatus = "ACTIVE" + ReplicaStatusRegionDisabled ReplicaStatus = "REGION_DISABLED" + ReplicaStatusInaccessibleEncryptionCredentials ReplicaStatus = "INACCESSIBLE_ENCRYPTION_CREDENTIALS" + ReplicaStatusArchiving ReplicaStatus = "ARCHIVING" + ReplicaStatusArchived ReplicaStatus = "ARCHIVED" + ReplicaStatusReplicationNotAuthorized ReplicaStatus = "REPLICATION_NOT_AUTHORIZED" +) + +// Values returns all known values for ReplicaStatus. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (ReplicaStatus) Values() []ReplicaStatus { + return []ReplicaStatus{ + "CREATING", + "CREATION_FAILED", + "UPDATING", + "DELETING", + "ACTIVE", + "REGION_DISABLED", + "INACCESSIBLE_ENCRYPTION_CREDENTIALS", + "ARCHIVING", + "ARCHIVED", + "REPLICATION_NOT_AUTHORIZED", + } +} + +type ReturnConsumedCapacity string + +// Enum values for ReturnConsumedCapacity +const ( + ReturnConsumedCapacityIndexes ReturnConsumedCapacity = "INDEXES" + ReturnConsumedCapacityTotal ReturnConsumedCapacity = "TOTAL" + ReturnConsumedCapacityNone ReturnConsumedCapacity = "NONE" +) + +// Values returns all known values for ReturnConsumedCapacity. Note that this can +// be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (ReturnConsumedCapacity) Values() []ReturnConsumedCapacity { + return []ReturnConsumedCapacity{ + "INDEXES", + "TOTAL", + "NONE", + } +} + +type ReturnItemCollectionMetrics string + +// Enum values for ReturnItemCollectionMetrics +const ( + ReturnItemCollectionMetricsSize ReturnItemCollectionMetrics = "SIZE" + ReturnItemCollectionMetricsNone ReturnItemCollectionMetrics = "NONE" +) + +// Values returns all known values for ReturnItemCollectionMetrics. Note that this +// can be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (ReturnItemCollectionMetrics) Values() []ReturnItemCollectionMetrics { + return []ReturnItemCollectionMetrics{ + "SIZE", + "NONE", + } +} + +type ReturnValue string + +// Enum values for ReturnValue +const ( + ReturnValueNone ReturnValue = "NONE" + ReturnValueAllOld ReturnValue = "ALL_OLD" + ReturnValueUpdatedOld ReturnValue = "UPDATED_OLD" + ReturnValueAllNew ReturnValue = "ALL_NEW" + ReturnValueUpdatedNew ReturnValue = "UPDATED_NEW" +) + +// Values returns all known values for ReturnValue. Note that this can be expanded +// in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (ReturnValue) Values() []ReturnValue { + return []ReturnValue{ + "NONE", + "ALL_OLD", + "UPDATED_OLD", + "ALL_NEW", + "UPDATED_NEW", + } +} + +type ReturnValuesOnConditionCheckFailure string + +// Enum values for ReturnValuesOnConditionCheckFailure +const ( + ReturnValuesOnConditionCheckFailureAllOld ReturnValuesOnConditionCheckFailure = "ALL_OLD" + ReturnValuesOnConditionCheckFailureNone ReturnValuesOnConditionCheckFailure = "NONE" +) + +// Values returns all known values for ReturnValuesOnConditionCheckFailure. Note +// that this can be expanded in the future, and so it is only as up to date as the +// client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (ReturnValuesOnConditionCheckFailure) Values() []ReturnValuesOnConditionCheckFailure { + return []ReturnValuesOnConditionCheckFailure{ + "ALL_OLD", + "NONE", + } +} + +type S3SseAlgorithm string + +// Enum values for S3SseAlgorithm +const ( + S3SseAlgorithmAes256 S3SseAlgorithm = "AES256" + S3SseAlgorithmKms S3SseAlgorithm = "KMS" +) + +// Values returns all known values for S3SseAlgorithm. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (S3SseAlgorithm) Values() []S3SseAlgorithm { + return []S3SseAlgorithm{ + "AES256", + "KMS", + } +} + +type ScalarAttributeType string + +// Enum values for ScalarAttributeType +const ( + ScalarAttributeTypeS ScalarAttributeType = "S" + ScalarAttributeTypeN ScalarAttributeType = "N" + ScalarAttributeTypeB ScalarAttributeType = "B" +) + +// Values returns all known values for ScalarAttributeType. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (ScalarAttributeType) Values() []ScalarAttributeType { + return []ScalarAttributeType{ + "S", + "N", + "B", + } +} + +type Select string + +// Enum values for Select +const ( + SelectAllAttributes Select = "ALL_ATTRIBUTES" + SelectAllProjectedAttributes Select = "ALL_PROJECTED_ATTRIBUTES" + SelectSpecificAttributes Select = "SPECIFIC_ATTRIBUTES" + SelectCount Select = "COUNT" +) + +// Values returns all known values for Select. Note that this can be expanded in +// the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (Select) Values() []Select { + return []Select{ + "ALL_ATTRIBUTES", + "ALL_PROJECTED_ATTRIBUTES", + "SPECIFIC_ATTRIBUTES", + "COUNT", + } +} + +type SSEStatus string + +// Enum values for SSEStatus +const ( + SSEStatusEnabling SSEStatus = "ENABLING" + SSEStatusEnabled SSEStatus = "ENABLED" + SSEStatusDisabling SSEStatus = "DISABLING" + SSEStatusDisabled SSEStatus = "DISABLED" + SSEStatusUpdating SSEStatus = "UPDATING" +) + +// Values returns all known values for SSEStatus. Note that this can be expanded +// in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (SSEStatus) Values() []SSEStatus { + return []SSEStatus{ + "ENABLING", + "ENABLED", + "DISABLING", + "DISABLED", + "UPDATING", + } +} + +type SSEType string + +// Enum values for SSEType +const ( + SSETypeAes256 SSEType = "AES256" + SSETypeKms SSEType = "KMS" +) + +// Values returns all known values for SSEType. Note that this can be expanded in +// the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (SSEType) Values() []SSEType { + return []SSEType{ + "AES256", + "KMS", + } +} + +type StreamViewType string + +// Enum values for StreamViewType +const ( + StreamViewTypeNewImage StreamViewType = "NEW_IMAGE" + StreamViewTypeOldImage StreamViewType = "OLD_IMAGE" + StreamViewTypeNewAndOldImages StreamViewType = "NEW_AND_OLD_IMAGES" + StreamViewTypeKeysOnly StreamViewType = "KEYS_ONLY" +) + +// Values returns all known values for StreamViewType. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (StreamViewType) Values() []StreamViewType { + return []StreamViewType{ + "NEW_IMAGE", + "OLD_IMAGE", + "NEW_AND_OLD_IMAGES", + "KEYS_ONLY", + } +} + +type TableClass string + +// Enum values for TableClass +const ( + TableClassStandard TableClass = "STANDARD" + TableClassStandardInfrequentAccess TableClass = "STANDARD_INFREQUENT_ACCESS" +) + +// Values returns all known values for TableClass. Note that this can be expanded +// in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (TableClass) Values() []TableClass { + return []TableClass{ + "STANDARD", + "STANDARD_INFREQUENT_ACCESS", + } +} + +type TableStatus string + +// Enum values for TableStatus +const ( + TableStatusCreating TableStatus = "CREATING" + TableStatusUpdating TableStatus = "UPDATING" + TableStatusDeleting TableStatus = "DELETING" + TableStatusActive TableStatus = "ACTIVE" + TableStatusInaccessibleEncryptionCredentials TableStatus = "INACCESSIBLE_ENCRYPTION_CREDENTIALS" + TableStatusArchiving TableStatus = "ARCHIVING" + TableStatusArchived TableStatus = "ARCHIVED" + TableStatusReplicationNotAuthorized TableStatus = "REPLICATION_NOT_AUTHORIZED" +) + +// Values returns all known values for TableStatus. Note that this can be expanded +// in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (TableStatus) Values() []TableStatus { + return []TableStatus{ + "CREATING", + "UPDATING", + "DELETING", + "ACTIVE", + "INACCESSIBLE_ENCRYPTION_CREDENTIALS", + "ARCHIVING", + "ARCHIVED", + "REPLICATION_NOT_AUTHORIZED", + } +} + +type TimeToLiveStatus string + +// Enum values for TimeToLiveStatus +const ( + TimeToLiveStatusEnabling TimeToLiveStatus = "ENABLING" + TimeToLiveStatusDisabling TimeToLiveStatus = "DISABLING" + TimeToLiveStatusEnabled TimeToLiveStatus = "ENABLED" + TimeToLiveStatusDisabled TimeToLiveStatus = "DISABLED" +) + +// Values returns all known values for TimeToLiveStatus. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (TimeToLiveStatus) Values() []TimeToLiveStatus { + return []TimeToLiveStatus{ + "ENABLING", + "DISABLING", + "ENABLED", + "DISABLED", + } +} + +type WitnessStatus string + +// Enum values for WitnessStatus +const ( + WitnessStatusCreating WitnessStatus = "CREATING" + WitnessStatusDeleting WitnessStatus = "DELETING" + WitnessStatusActive WitnessStatus = "ACTIVE" +) + +// Values returns all known values for WitnessStatus. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (WitnessStatus) Values() []WitnessStatus { + return []WitnessStatus{ + "CREATING", + "DELETING", + "ACTIVE", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/types/errors.go new file mode 100644 index 0000000000..45bf62b57d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/types/errors.go @@ -0,0 +1,1186 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +import ( + "fmt" + smithy "github.com/aws/smithy-go" +) + +// There is another ongoing conflicting backup control plane operation on the +// table. The backup is either being created, deleted or restored to a table. +type BackupInUseException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *BackupInUseException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *BackupInUseException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *BackupInUseException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "BackupInUseException" + } + return *e.ErrorCodeOverride +} +func (e *BackupInUseException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Backup not found for the given BackupARN. +type BackupNotFoundException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *BackupNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *BackupNotFoundException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *BackupNotFoundException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "BackupNotFoundException" + } + return *e.ErrorCodeOverride +} +func (e *BackupNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// A condition specified in the operation failed to be evaluated. +type ConditionalCheckFailedException struct { + Message *string + + ErrorCodeOverride *string + + Item map[string]AttributeValue + + noSmithyDocumentSerde +} + +func (e *ConditionalCheckFailedException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ConditionalCheckFailedException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ConditionalCheckFailedException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "ConditionalCheckFailedException" + } + return *e.ErrorCodeOverride +} +func (e *ConditionalCheckFailedException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Backups have not yet been enabled for this table. +type ContinuousBackupsUnavailableException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *ContinuousBackupsUnavailableException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ContinuousBackupsUnavailableException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ContinuousBackupsUnavailableException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "ContinuousBackupsUnavailableException" + } + return *e.ErrorCodeOverride +} +func (e *ContinuousBackupsUnavailableException) ErrorFault() smithy.ErrorFault { + return smithy.FaultClient +} + +// There was an attempt to insert an item with the same primary key as an item +// +// that already exists in the DynamoDB table. +type DuplicateItemException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *DuplicateItemException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *DuplicateItemException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *DuplicateItemException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "DuplicateItemException" + } + return *e.ErrorCodeOverride +} +func (e *DuplicateItemException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// There was a conflict when writing to the specified S3 bucket. +type ExportConflictException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *ExportConflictException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ExportConflictException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ExportConflictException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "ExportConflictException" + } + return *e.ErrorCodeOverride +} +func (e *ExportConflictException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The specified export was not found. +type ExportNotFoundException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *ExportNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ExportNotFoundException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ExportNotFoundException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "ExportNotFoundException" + } + return *e.ErrorCodeOverride +} +func (e *ExportNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The specified global table already exists. +type GlobalTableAlreadyExistsException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *GlobalTableAlreadyExistsException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *GlobalTableAlreadyExistsException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *GlobalTableAlreadyExistsException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "GlobalTableAlreadyExistsException" + } + return *e.ErrorCodeOverride +} +func (e *GlobalTableAlreadyExistsException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The specified global table does not exist. +type GlobalTableNotFoundException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *GlobalTableNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *GlobalTableNotFoundException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *GlobalTableNotFoundException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "GlobalTableNotFoundException" + } + return *e.ErrorCodeOverride +} +func (e *GlobalTableNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// DynamoDB rejected the request because you retried a request with a different +// payload but with an idempotent token that was already used. +type IdempotentParameterMismatchException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *IdempotentParameterMismatchException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *IdempotentParameterMismatchException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *IdempotentParameterMismatchException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "IdempotentParameterMismatchException" + } + return *e.ErrorCodeOverride +} +func (e *IdempotentParameterMismatchException) ErrorFault() smithy.ErrorFault { + return smithy.FaultClient +} + +// There was a conflict when importing from the specified S3 source. This can +// +// occur when the current import conflicts with a previous import request that had +// the same client token. +type ImportConflictException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *ImportConflictException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ImportConflictException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ImportConflictException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "ImportConflictException" + } + return *e.ErrorCodeOverride +} +func (e *ImportConflictException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The specified import was not found. +type ImportNotFoundException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *ImportNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ImportNotFoundException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ImportNotFoundException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "ImportNotFoundException" + } + return *e.ErrorCodeOverride +} +func (e *ImportNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The operation tried to access a nonexistent index. +type IndexNotFoundException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *IndexNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *IndexNotFoundException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *IndexNotFoundException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "IndexNotFoundException" + } + return *e.ErrorCodeOverride +} +func (e *IndexNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// An error occurred on the server side. +type InternalServerError struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *InternalServerError) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InternalServerError) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InternalServerError) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InternalServerError" + } + return *e.ErrorCodeOverride +} +func (e *InternalServerError) ErrorFault() smithy.ErrorFault { return smithy.FaultServer } + +type InvalidEndpointException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *InvalidEndpointException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidEndpointException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidEndpointException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InvalidEndpointException" + } + return *e.ErrorCodeOverride +} +func (e *InvalidEndpointException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The specified ExportTime is outside of the point in time recovery window. +type InvalidExportTimeException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *InvalidExportTimeException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidExportTimeException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidExportTimeException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InvalidExportTimeException" + } + return *e.ErrorCodeOverride +} +func (e *InvalidExportTimeException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// An invalid restore time was specified. RestoreDateTime must be between +// EarliestRestorableDateTime and LatestRestorableDateTime. +type InvalidRestoreTimeException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *InvalidRestoreTimeException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidRestoreTimeException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidRestoreTimeException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InvalidRestoreTimeException" + } + return *e.ErrorCodeOverride +} +func (e *InvalidRestoreTimeException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// An item collection is too large. This exception is only returned for tables +// that have one or more local secondary indexes. +type ItemCollectionSizeLimitExceededException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *ItemCollectionSizeLimitExceededException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ItemCollectionSizeLimitExceededException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ItemCollectionSizeLimitExceededException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "ItemCollectionSizeLimitExceededException" + } + return *e.ErrorCodeOverride +} +func (e *ItemCollectionSizeLimitExceededException) ErrorFault() smithy.ErrorFault { + return smithy.FaultClient +} + +// There is no limit to the number of daily on-demand backups that can be taken. +// +// For most purposes, up to 500 simultaneous table operations are allowed per +// account. These operations include CreateTable , UpdateTable , DeleteTable , +// UpdateTimeToLive , RestoreTableFromBackup , and RestoreTableToPointInTime . +// +// When you are creating a table with one or more secondary indexes, you can have +// up to 250 such requests running at a time. However, if the table or index +// specifications are complex, then DynamoDB might temporarily reduce the number of +// concurrent operations. +// +// When importing into DynamoDB, up to 50 simultaneous import table operations are +// allowed per account. +// +// There is a soft account quota of 2,500 tables. +// +// GetRecords was called with a value of more than 1000 for the limit request +// parameter. +// +// More than 2 processes are reading from the same streams shard at the same time. +// Exceeding this limit may result in request throttling. +type LimitExceededException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *LimitExceededException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *LimitExceededException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *LimitExceededException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "LimitExceededException" + } + return *e.ErrorCodeOverride +} +func (e *LimitExceededException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Point in time recovery has not yet been enabled for this source table. +type PointInTimeRecoveryUnavailableException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *PointInTimeRecoveryUnavailableException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *PointInTimeRecoveryUnavailableException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *PointInTimeRecoveryUnavailableException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "PointInTimeRecoveryUnavailableException" + } + return *e.ErrorCodeOverride +} +func (e *PointInTimeRecoveryUnavailableException) ErrorFault() smithy.ErrorFault { + return smithy.FaultClient +} + +// The operation tried to access a nonexistent resource-based policy. +// +// If you specified an ExpectedRevisionId , it's possible that a policy is present +// for the resource but its revision ID didn't match the expected value. +type PolicyNotFoundException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *PolicyNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *PolicyNotFoundException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *PolicyNotFoundException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "PolicyNotFoundException" + } + return *e.ErrorCodeOverride +} +func (e *PolicyNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was denied due to request throttling. For detailed information +// about why the request was throttled and the ARN of the impacted resource, find +// the [ThrottlingReason]field in the returned exception. The Amazon Web Services SDKs for DynamoDB +// automatically retry requests that receive this exception. Your request is +// eventually successful, unless your retry queue is too large to finish. Reduce +// the frequency of requests and use exponential backoff. For more information, go +// to [Error Retries and Exponential Backoff]in the Amazon DynamoDB Developer Guide. +// +// [ThrottlingReason]: https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_ThrottlingReason.html +// [Error Retries and Exponential Backoff]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff +type ProvisionedThroughputExceededException struct { + Message *string + + ErrorCodeOverride *string + + ThrottlingReasons []ThrottlingReason + + noSmithyDocumentSerde +} + +func (e *ProvisionedThroughputExceededException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ProvisionedThroughputExceededException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ProvisionedThroughputExceededException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "ProvisionedThroughputExceededException" + } + return *e.ErrorCodeOverride +} +func (e *ProvisionedThroughputExceededException) ErrorFault() smithy.ErrorFault { + return smithy.FaultClient +} + +// The specified replica is already part of the global table. +type ReplicaAlreadyExistsException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *ReplicaAlreadyExistsException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ReplicaAlreadyExistsException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ReplicaAlreadyExistsException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "ReplicaAlreadyExistsException" + } + return *e.ErrorCodeOverride +} +func (e *ReplicaAlreadyExistsException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The specified replica is no longer part of the global table. +type ReplicaNotFoundException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *ReplicaNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ReplicaNotFoundException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ReplicaNotFoundException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "ReplicaNotFoundException" + } + return *e.ErrorCodeOverride +} +func (e *ReplicaNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because one or more items in the request are being +// modified by a request in another Region. +type ReplicatedWriteConflictException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *ReplicatedWriteConflictException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ReplicatedWriteConflictException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ReplicatedWriteConflictException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "ReplicatedWriteConflictException" + } + return *e.ErrorCodeOverride +} +func (e *ReplicatedWriteConflictException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Throughput exceeds the current throughput quota for your account. For detailed +// information about why the request was throttled and the ARN of the impacted +// resource, find the [ThrottlingReason]field in the returned exception. Contact [Amazon Web Services Support] to request a quota +// increase. +// +// [Amazon Web Services Support]: https://aws.amazon.com/support +// [ThrottlingReason]: https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_ThrottlingReason.html +type RequestLimitExceeded struct { + Message *string + + ErrorCodeOverride *string + + ThrottlingReasons []ThrottlingReason + + noSmithyDocumentSerde +} + +func (e *RequestLimitExceeded) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *RequestLimitExceeded) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *RequestLimitExceeded) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "RequestLimitExceeded" + } + return *e.ErrorCodeOverride +} +func (e *RequestLimitExceeded) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The operation conflicts with the resource's availability. For example: +// +// - You attempted to recreate an existing table. +// +// - You tried to delete a table currently in the CREATING state. +// +// - You tried to update a resource that was already being updated. +// +// When appropriate, wait for the ongoing update to complete and attempt the +// request again. +type ResourceInUseException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *ResourceInUseException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ResourceInUseException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ResourceInUseException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "ResourceInUseException" + } + return *e.ErrorCodeOverride +} +func (e *ResourceInUseException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The operation tried to access a nonexistent table or index. The resource might +// not be specified correctly, or its status might not be ACTIVE . +type ResourceNotFoundException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *ResourceNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ResourceNotFoundException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ResourceNotFoundException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "ResourceNotFoundException" + } + return *e.ErrorCodeOverride +} +func (e *ResourceNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// A target table with the specified name already exists. +type TableAlreadyExistsException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *TableAlreadyExistsException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *TableAlreadyExistsException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *TableAlreadyExistsException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "TableAlreadyExistsException" + } + return *e.ErrorCodeOverride +} +func (e *TableAlreadyExistsException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// A target table with the specified name is either being created or deleted. +type TableInUseException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *TableInUseException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *TableInUseException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *TableInUseException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "TableInUseException" + } + return *e.ErrorCodeOverride +} +func (e *TableInUseException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// A source table with the name TableName does not currently exist within the +// subscriber's account or the subscriber is operating in the wrong Amazon Web +// Services Region. +type TableNotFoundException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *TableNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *TableNotFoundException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *TableNotFoundException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "TableNotFoundException" + } + return *e.ErrorCodeOverride +} +func (e *TableNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was denied due to request throttling. For detailed information +// about why the request was throttled and the ARN of the impacted resource, find +// the [ThrottlingReason]field in the returned exception. +// +// [ThrottlingReason]: https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_ThrottlingReason.html +type ThrottlingException struct { + Message *string + + ErrorCodeOverride *string + + ThrottlingReasons []ThrottlingReason + + noSmithyDocumentSerde +} + +func (e *ThrottlingException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ThrottlingException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ThrottlingException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "ThrottlingException" + } + return *e.ErrorCodeOverride +} +func (e *ThrottlingException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The entire transaction request was canceled. +// +// DynamoDB cancels a TransactWriteItems request under the following circumstances: +// +// - A condition in one of the condition expressions is not met. +// +// - A table in the TransactWriteItems request is in a different account or +// region. +// +// - More than one action in the TransactWriteItems operation targets the same +// item. +// +// - There is insufficient provisioned capacity for the transaction to be +// completed. +// +// - An item size becomes too large (larger than 400 KB), or a local secondary +// index (LSI) becomes too large, or a similar validation error occurs because of +// changes made by the transaction. +// +// - There is a user error, such as an invalid data format. +// +// - There is an ongoing TransactWriteItems operation that conflicts with a +// concurrent TransactWriteItems request. In this case the TransactWriteItems +// operation fails with a TransactionCanceledException . +// +// DynamoDB cancels a TransactGetItems request under the following circumstances: +// +// - There is an ongoing TransactGetItems operation that conflicts with a +// concurrent PutItem , UpdateItem , DeleteItem or TransactWriteItems request. In +// this case the TransactGetItems operation fails with a +// TransactionCanceledException . +// +// - A table in the TransactGetItems request is in a different account or region. +// +// - There is insufficient provisioned capacity for the transaction to be +// completed. +// +// - There is a user error, such as an invalid data format. +// +// If using Java, DynamoDB lists the cancellation reasons on the +// CancellationReasons property. This property is not set for other languages. +// Transaction cancellation reasons are ordered in the order of requested items, if +// an item has no error it will have None code and Null message. +// +// Cancellation reason codes and possible error messages: +// +// - No Errors: +// +// - Code: None +// +// - Message: null +// +// - Conditional Check Failed: +// +// - Code: ConditionalCheckFailed +// +// - Message: The conditional request failed. +// +// - Item Collection Size Limit Exceeded: +// +// - Code: ItemCollectionSizeLimitExceeded +// +// - Message: Collection size exceeded. +// +// - Transaction Conflict: +// +// - Code: TransactionConflict +// +// - Message: Transaction is ongoing for the item. +// +// - Provisioned Throughput Exceeded: +// +// - Code: ProvisionedThroughputExceeded +// +// - Messages: +// +// - The level of configured provisioned throughput for the table was exceeded. +// Consider increasing your provisioning level with the UpdateTable API. +// +// This Message is received when provisioned throughput is exceeded is on a +// +// provisioned DynamoDB table. +// +// - The level of configured provisioned throughput for one or more global +// secondary indexes of the table was exceeded. Consider increasing your +// provisioning level for the under-provisioned global secondary indexes with the +// UpdateTable API. +// +// This message is returned when provisioned throughput is exceeded is on a +// +// provisioned GSI. +// +// - Throttling Error: +// +// - Code: ThrottlingError +// +// - Messages: +// +// - Throughput exceeds the current capacity of your table or index. DynamoDB is +// automatically scaling your table or index so please try again shortly. If +// exceptions persist, check if you have a hot key: +// https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/bp-partition-key-design.html. +// +// This message is returned when writes get throttled on an On-Demand table as +// +// DynamoDB is automatically scaling the table. +// +// - Throughput exceeds the current capacity for one or more global secondary +// indexes. DynamoDB is automatically scaling your index so please try again +// shortly. +// +// This message is returned when writes get throttled on an On-Demand GSI as +// +// DynamoDB is automatically scaling the GSI. +// +// - Validation Error: +// +// - Code: ValidationError +// +// - Messages: +// +// - One or more parameter values were invalid. +// +// - The update expression attempted to update the secondary index key beyond +// allowed size limits. +// +// - The update expression attempted to update the secondary index key to +// unsupported type. +// +// - An operand in the update expression has an incorrect data type. +// +// - Item size to update has exceeded the maximum allowed size. +// +// - Number overflow. Attempting to store a number with magnitude larger than +// supported range. +// +// - Type mismatch for attribute to update. +// +// - Nesting Levels have exceeded supported limits. +// +// - The document path provided in the update expression is invalid for update. +// +// - The provided expression refers to an attribute that does not exist in the +// item. +type TransactionCanceledException struct { + Message *string + + ErrorCodeOverride *string + + CancellationReasons []CancellationReason + + noSmithyDocumentSerde +} + +func (e *TransactionCanceledException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *TransactionCanceledException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *TransactionCanceledException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "TransactionCanceledException" + } + return *e.ErrorCodeOverride +} +func (e *TransactionCanceledException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Operation was rejected because there is an ongoing transaction for the item. +type TransactionConflictException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *TransactionConflictException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *TransactionConflictException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *TransactionConflictException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "TransactionConflictException" + } + return *e.ErrorCodeOverride +} +func (e *TransactionConflictException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The transaction with the given request token is already in progress. +// +// # Recommended Settings +// +// This is a general recommendation for handling the TransactionInProgressException +// . These settings help ensure that the client retries will trigger completion of +// the ongoing TransactWriteItems request. +// +// - Set clientExecutionTimeout to a value that allows at least one retry to be +// processed after 5 seconds have elapsed since the first attempt for the +// TransactWriteItems operation. +// +// - Set socketTimeout to a value a little lower than the requestTimeout setting. +// +// - requestTimeout should be set based on the time taken for the individual +// retries of a single HTTP request for your use case, but setting it to 1 second +// or higher should work well to reduce chances of retries and +// TransactionInProgressException errors. +// +// - Use exponential backoff when retrying and tune backoff if needed. +// +// Assuming [default retry policy], example timeout settings based on the guidelines above are as +// follows: +// +// Example timeline: +// +// - 0-1000 first attempt +// +// - 1000-1500 first sleep/delay (default retry policy uses 500 ms as base delay +// for 4xx errors) +// +// - 1500-2500 second attempt +// +// - 2500-3500 second sleep/delay (500 * 2, exponential backoff) +// +// - 3500-4500 third attempt +// +// - 4500-6500 third sleep/delay (500 * 2^2) +// +// - 6500-7500 fourth attempt (this can trigger inline recovery since 5 seconds +// have elapsed since the first attempt reached TC) +// +// [default retry policy]: https://github.com/aws/aws-sdk-java/blob/fd409dee8ae23fb8953e0bb4dbde65536a7e0514/aws-java-sdk-core/src/main/java/com/amazonaws/retry/PredefinedRetryPolicies.java#L97 +type TransactionInProgressException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *TransactionInProgressException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *TransactionInProgressException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *TransactionInProgressException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "TransactionInProgressException" + } + return *e.ErrorCodeOverride +} +func (e *TransactionInProgressException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/types/types.go new file mode 100644 index 0000000000..ce3801ef9c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/types/types.go @@ -0,0 +1,3838 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +import ( + smithydocument "github.com/aws/smithy-go/document" + "time" +) + +// Contains details of a table archival operation. +type ArchivalSummary struct { + + // The Amazon Resource Name (ARN) of the backup the table was archived to, when + // applicable in the archival reason. If you wish to restore this backup to the + // same table name, you will need to delete the original table. + ArchivalBackupArn *string + + // The date and time when table archival was initiated by DynamoDB, in UNIX epoch + // time format. + ArchivalDateTime *time.Time + + // The reason DynamoDB archived the table. Currently, the only possible value is: + // + // - INACCESSIBLE_ENCRYPTION_CREDENTIALS - The table was archived due to the + // table's KMS key being inaccessible for more than seven days. An On-Demand backup + // was created at the archival time. + ArchivalReason *string + + noSmithyDocumentSerde +} + +// Represents an attribute for describing the schema for the table and indexes. +type AttributeDefinition struct { + + // A name for the attribute. + // + // This member is required. + AttributeName *string + + // The data type for the attribute, where: + // + // - S - the attribute is of type String + // + // - N - the attribute is of type Number + // + // - B - the attribute is of type Binary + // + // This member is required. + AttributeType ScalarAttributeType + + noSmithyDocumentSerde +} + +// Represents the data for an attribute. +// +// Each attribute value is described as a name-value pair. The name is the data +// type, and the value is the data itself. +// +// For more information, see [Data Types] in the Amazon DynamoDB Developer Guide. +// +// The following types satisfy this interface: +// +// AttributeValueMemberB +// AttributeValueMemberBOOL +// AttributeValueMemberBS +// AttributeValueMemberL +// AttributeValueMemberM +// AttributeValueMemberN +// AttributeValueMemberNS +// AttributeValueMemberNULL +// AttributeValueMemberS +// AttributeValueMemberSS +// +// [Data Types]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.NamingRulesDataTypes.html#HowItWorks.DataTypes +type AttributeValue interface { + isAttributeValue() +} + +// An attribute of type Binary. For example: +// +// "B": "dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk" +type AttributeValueMemberB struct { + Value []byte + + noSmithyDocumentSerde +} + +func (*AttributeValueMemberB) isAttributeValue() {} + +// An attribute of type Boolean. For example: +// +// "BOOL": true +type AttributeValueMemberBOOL struct { + Value bool + + noSmithyDocumentSerde +} + +func (*AttributeValueMemberBOOL) isAttributeValue() {} + +// An attribute of type Binary Set. For example: +// +// "BS": ["U3Vubnk=", "UmFpbnk=", "U25vd3k="] +type AttributeValueMemberBS struct { + Value [][]byte + + noSmithyDocumentSerde +} + +func (*AttributeValueMemberBS) isAttributeValue() {} + +// An attribute of type List. For example: +// +// "L": [ {"S": "Cookies"} , {"S": "Coffee"}, {"N": "3.14159"}] +type AttributeValueMemberL struct { + Value []AttributeValue + + noSmithyDocumentSerde +} + +func (*AttributeValueMemberL) isAttributeValue() {} + +// An attribute of type Map. For example: +// +// "M": {"Name": {"S": "Joe"}, "Age": {"N": "35"}} +type AttributeValueMemberM struct { + Value map[string]AttributeValue + + noSmithyDocumentSerde +} + +func (*AttributeValueMemberM) isAttributeValue() {} + +// An attribute of type Number. For example: +// +// "N": "123.45" +// +// Numbers are sent across the network to DynamoDB as strings, to maximize +// compatibility across languages and libraries. However, DynamoDB treats them as +// number type attributes for mathematical operations. +type AttributeValueMemberN struct { + Value string + + noSmithyDocumentSerde +} + +func (*AttributeValueMemberN) isAttributeValue() {} + +// An attribute of type Number Set. For example: +// +// "NS": ["42.2", "-19", "7.5", "3.14"] +// +// Numbers are sent across the network to DynamoDB as strings, to maximize +// compatibility across languages and libraries. However, DynamoDB treats them as +// number type attributes for mathematical operations. +type AttributeValueMemberNS struct { + Value []string + + noSmithyDocumentSerde +} + +func (*AttributeValueMemberNS) isAttributeValue() {} + +// An attribute of type Null. For example: +// +// "NULL": true +type AttributeValueMemberNULL struct { + Value bool + + noSmithyDocumentSerde +} + +func (*AttributeValueMemberNULL) isAttributeValue() {} + +// An attribute of type String. For example: +// +// "S": "Hello" +type AttributeValueMemberS struct { + Value string + + noSmithyDocumentSerde +} + +func (*AttributeValueMemberS) isAttributeValue() {} + +// An attribute of type String Set. For example: +// +// "SS": ["Giraffe", "Hippo" ,"Zebra"] +type AttributeValueMemberSS struct { + Value []string + + noSmithyDocumentSerde +} + +func (*AttributeValueMemberSS) isAttributeValue() {} + +// For the UpdateItem operation, represents the attributes to be modified, the +// action to perform on each, and the new value for each. +// +// You cannot use UpdateItem to update any primary key attributes. Instead, you +// will need to delete the item, and then use PutItem to create a new item with +// new attributes. +// +// Attribute values cannot be null; string and binary type attributes must have +// lengths greater than zero; and set type attributes must not be empty. Requests +// with empty values will be rejected with a ValidationException exception. +type AttributeValueUpdate struct { + + // Specifies how to perform the update. Valid values are PUT (default), DELETE , + // and ADD . The behavior depends on whether the specified primary key already + // exists in the table. + // + // If an item with the specified Key is found in the table: + // + // - PUT - Adds the specified attribute to the item. If the attribute already + // exists, it is replaced by the new value. + // + // - DELETE - If no value is specified, the attribute and its value are removed + // from the item. The data type of the specified value must match the existing + // value's data type. + // + // If a set of values is specified, then those values are subtracted from the old + // set. For example, if the attribute value was the set [a,b,c] and the DELETE + // action specified [a,c] , then the final attribute value would be [b] . + // Specifying an empty set is an error. + // + // - ADD - If the attribute does not already exist, then the attribute and its + // values are added to the item. If the attribute does exist, then the behavior of + // ADD depends on the data type of the attribute: + // + // - If the existing attribute is a number, and if Value is also a number, then + // the Value is mathematically added to the existing attribute. If Value is a + // negative number, then it is subtracted from the existing attribute. + // + // If you use ADD to increment or decrement a number value for an item that doesn't + // exist before the update, DynamoDB uses 0 as the initial value. + // + // In addition, if you use ADD to update an existing item, and intend to increment + // or decrement an attribute value which does not yet exist, DynamoDB uses 0 as + // the initial value. For example, suppose that the item you want to update does + // not yet have an attribute named itemcount, but you decide to ADD the number 3 + // to this attribute anyway, even though it currently does not exist. DynamoDB will + // create the itemcount attribute, set its initial value to 0 , and finally add 3 + // to it. The result will be a new itemcount attribute in the item, with a value of + // 3 . + // + // - If the existing data type is a set, and if the Value is also a set, then the + // Value is added to the existing set. (This is a set operation, not mathematical + // addition.) For example, if the attribute value was the set [1,2] , and the ADD + // action specified [3] , then the final attribute value would be [1,2,3] . An + // error occurs if an Add action is specified for a set attribute and the attribute + // type specified does not match the existing set type. + // + // Both sets must have the same primitive data type. For example, if the existing + // data type is a set of strings, the Value must also be a set of strings. The + // same holds true for number sets and binary sets. + // + // This action is only valid for an existing attribute whose data type is number + // or is a set. Do not use ADD for any other data types. + // + // If no item with the specified Key is found: + // + // - PUT - DynamoDB creates a new item with the specified primary key, and then + // adds the attribute. + // + // - DELETE - Nothing happens; there is no attribute to delete. + // + // - ADD - DynamoDB creates a new item with the supplied primary key and number + // (or set) for the attribute value. The only data types allowed are number, number + // set, string set or binary set. + Action AttributeAction + + // Represents the data for an attribute. + // + // Each attribute value is described as a name-value pair. The name is the data + // type, and the value is the data itself. + // + // For more information, see [Data Types] in the Amazon DynamoDB Developer Guide. + // + // [Data Types]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.NamingRulesDataTypes.html#HowItWorks.DataTypes + Value AttributeValue + + noSmithyDocumentSerde +} + +// Represents the properties of the scaling policy. +type AutoScalingPolicyDescription struct { + + // The name of the scaling policy. + PolicyName *string + + // Represents a target tracking scaling policy configuration. + TargetTrackingScalingPolicyConfiguration *AutoScalingTargetTrackingScalingPolicyConfigurationDescription + + noSmithyDocumentSerde +} + +// Represents the auto scaling policy to be modified. +type AutoScalingPolicyUpdate struct { + + // Represents a target tracking scaling policy configuration. + // + // This member is required. + TargetTrackingScalingPolicyConfiguration *AutoScalingTargetTrackingScalingPolicyConfigurationUpdate + + // The name of the scaling policy. + PolicyName *string + + noSmithyDocumentSerde +} + +// Represents the auto scaling settings for a global table or global secondary +// index. +type AutoScalingSettingsDescription struct { + + // Disabled auto scaling for this global table or global secondary index. + AutoScalingDisabled *bool + + // Role ARN used for configuring the auto scaling policy. + AutoScalingRoleArn *string + + // The maximum capacity units that a global table or global secondary index should + // be scaled up to. + MaximumUnits *int64 + + // The minimum capacity units that a global table or global secondary index should + // be scaled down to. + MinimumUnits *int64 + + // Information about the scaling policies. + ScalingPolicies []AutoScalingPolicyDescription + + noSmithyDocumentSerde +} + +// Represents the auto scaling settings to be modified for a global table or +// global secondary index. +type AutoScalingSettingsUpdate struct { + + // Disabled auto scaling for this global table or global secondary index. + AutoScalingDisabled *bool + + // Role ARN used for configuring auto scaling policy. + AutoScalingRoleArn *string + + // The maximum capacity units that a global table or global secondary index should + // be scaled up to. + MaximumUnits *int64 + + // The minimum capacity units that a global table or global secondary index should + // be scaled down to. + MinimumUnits *int64 + + // The scaling policy to apply for scaling target global table or global secondary + // index capacity units. + ScalingPolicyUpdate *AutoScalingPolicyUpdate + + noSmithyDocumentSerde +} + +// Represents the properties of a target tracking scaling policy. +type AutoScalingTargetTrackingScalingPolicyConfigurationDescription struct { + + // The target value for the metric. The range is 8.515920e-109 to 1.174271e+108 + // (Base 10) or 2e-360 to 2e360 (Base 2). + // + // This member is required. + TargetValue *float64 + + // Indicates whether scale in by the target tracking policy is disabled. If the + // value is true, scale in is disabled and the target tracking policy won't remove + // capacity from the scalable resource. Otherwise, scale in is enabled and the + // target tracking policy can remove capacity from the scalable resource. The + // default value is false. + DisableScaleIn *bool + + // The amount of time, in seconds, after a scale in activity completes before + // another scale in activity can start. The cooldown period is used to block + // subsequent scale in requests until it has expired. You should scale in + // conservatively to protect your application's availability. However, if another + // alarm triggers a scale out policy during the cooldown period after a scale-in, + // application auto scaling scales out your scalable target immediately. + ScaleInCooldown *int32 + + // The amount of time, in seconds, after a scale out activity completes before + // another scale out activity can start. While the cooldown period is in effect, + // the capacity that has been added by the previous scale out event that initiated + // the cooldown is calculated as part of the desired capacity for the next scale + // out. You should continuously (but not excessively) scale out. + ScaleOutCooldown *int32 + + noSmithyDocumentSerde +} + +// Represents the settings of a target tracking scaling policy that will be +// modified. +type AutoScalingTargetTrackingScalingPolicyConfigurationUpdate struct { + + // The target value for the metric. The range is 8.515920e-109 to 1.174271e+108 + // (Base 10) or 2e-360 to 2e360 (Base 2). + // + // This member is required. + TargetValue *float64 + + // Indicates whether scale in by the target tracking policy is disabled. If the + // value is true, scale in is disabled and the target tracking policy won't remove + // capacity from the scalable resource. Otherwise, scale in is enabled and the + // target tracking policy can remove capacity from the scalable resource. The + // default value is false. + DisableScaleIn *bool + + // The amount of time, in seconds, after a scale in activity completes before + // another scale in activity can start. The cooldown period is used to block + // subsequent scale in requests until it has expired. You should scale in + // conservatively to protect your application's availability. However, if another + // alarm triggers a scale out policy during the cooldown period after a scale-in, + // application auto scaling scales out your scalable target immediately. + ScaleInCooldown *int32 + + // The amount of time, in seconds, after a scale out activity completes before + // another scale out activity can start. While the cooldown period is in effect, + // the capacity that has been added by the previous scale out event that initiated + // the cooldown is calculated as part of the desired capacity for the next scale + // out. You should continuously (but not excessively) scale out. + ScaleOutCooldown *int32 + + noSmithyDocumentSerde +} + +// Contains the description of the backup created for the table. +type BackupDescription struct { + + // Contains the details of the backup created for the table. + BackupDetails *BackupDetails + + // Contains the details of the table when the backup was created. + SourceTableDetails *SourceTableDetails + + // Contains the details of the features enabled on the table when the backup was + // created. For example, LSIs, GSIs, streams, TTL. + SourceTableFeatureDetails *SourceTableFeatureDetails + + noSmithyDocumentSerde +} + +// Contains the details of the backup created for the table. +type BackupDetails struct { + + // ARN associated with the backup. + // + // This member is required. + BackupArn *string + + // Time at which the backup was created. This is the request time of the backup. + // + // This member is required. + BackupCreationDateTime *time.Time + + // Name of the requested backup. + // + // This member is required. + BackupName *string + + // Backup can be in one of the following states: CREATING, ACTIVE, DELETED. + // + // This member is required. + BackupStatus BackupStatus + + // BackupType: + // + // - USER - You create and manage these using the on-demand backup feature. + // + // - SYSTEM - If you delete a table with point-in-time recovery enabled, a SYSTEM + // backup is automatically created and is retained for 35 days (at no additional + // cost). System backups allow you to restore the deleted table to the state it was + // in just before the point of deletion. + // + // - AWS_BACKUP - On-demand backup created by you from Backup service. + // + // This member is required. + BackupType BackupType + + // Time at which the automatic on-demand backup created by DynamoDB will expire. + // This SYSTEM on-demand backup expires automatically 35 days after its creation. + BackupExpiryDateTime *time.Time + + // Size of the backup in bytes. DynamoDB updates this value approximately every + // six hours. Recent changes might not be reflected in this value. + BackupSizeBytes *int64 + + noSmithyDocumentSerde +} + +// Contains details for the backup. +type BackupSummary struct { + + // ARN associated with the backup. + BackupArn *string + + // Time at which the backup was created. + BackupCreationDateTime *time.Time + + // Time at which the automatic on-demand backup created by DynamoDB will expire. + // This SYSTEM on-demand backup expires automatically 35 days after its creation. + BackupExpiryDateTime *time.Time + + // Name of the specified backup. + BackupName *string + + // Size of the backup in bytes. + BackupSizeBytes *int64 + + // Backup can be in one of the following states: CREATING, ACTIVE, DELETED. + BackupStatus BackupStatus + + // BackupType: + // + // - USER - You create and manage these using the on-demand backup feature. + // + // - SYSTEM - If you delete a table with point-in-time recovery enabled, a SYSTEM + // backup is automatically created and is retained for 35 days (at no additional + // cost). System backups allow you to restore the deleted table to the state it was + // in just before the point of deletion. + // + // - AWS_BACKUP - On-demand backup created by you from Backup service. + BackupType BackupType + + // ARN associated with the table. + TableArn *string + + // Unique identifier for the table. + TableId *string + + // Name of the table. + TableName *string + + noSmithyDocumentSerde +} + +// An error associated with a statement in a PartiQL batch that was run. +type BatchStatementError struct { + + // The error code associated with the failed PartiQL batch statement. + Code BatchStatementErrorCodeEnum + + // The item which caused the condition check to fail. This will be set if + // ReturnValuesOnConditionCheckFailure is specified as ALL_OLD . + Item map[string]AttributeValue + + // The error message associated with the PartiQL batch response. + Message *string + + noSmithyDocumentSerde +} + +// A PartiQL batch statement request. +type BatchStatementRequest struct { + + // A valid PartiQL statement. + // + // This member is required. + Statement *string + + // The read consistency of the PartiQL batch request. + ConsistentRead *bool + + // The parameters associated with a PartiQL statement in the batch request. + Parameters []AttributeValue + + // An optional parameter that returns the item attributes for a PartiQL batch + // request operation that failed a condition check. + // + // There is no additional cost associated with requesting a return value aside + // from the small network and processing overhead of receiving a larger response. + // No read capacity units are consumed. + ReturnValuesOnConditionCheckFailure ReturnValuesOnConditionCheckFailure + + noSmithyDocumentSerde +} + +// A PartiQL batch statement response.. +type BatchStatementResponse struct { + + // The error associated with a failed PartiQL batch statement. + Error *BatchStatementError + + // A DynamoDB item associated with a BatchStatementResponse + Item map[string]AttributeValue + + // The table name associated with a failed PartiQL batch statement. + TableName *string + + noSmithyDocumentSerde +} + +// Contains the details for the read/write capacity mode. This page talks about +// PROVISIONED and PAY_PER_REQUEST billing modes. For more information about these +// modes, see [Read/write capacity mode]. +// +// You may need to switch to on-demand mode at least once in order to return a +// BillingModeSummary response. +// +// [Read/write capacity mode]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html +type BillingModeSummary struct { + + // Controls how you are charged for read and write throughput and how you manage + // capacity. This setting can be changed later. + // + // - PROVISIONED - Sets the read/write capacity mode to PROVISIONED . We + // recommend using PROVISIONED for predictable workloads. + // + // - PAY_PER_REQUEST - Sets the read/write capacity mode to PAY_PER_REQUEST . We + // recommend using PAY_PER_REQUEST for unpredictable workloads. + BillingMode BillingMode + + // Represents the time when PAY_PER_REQUEST was last set as the read/write + // capacity mode. + LastUpdateToPayPerRequestDateTime *time.Time + + noSmithyDocumentSerde +} + +// An ordered list of errors for each item in the request which caused the +// transaction to get cancelled. The values of the list are ordered according to +// the ordering of the TransactWriteItems request parameter. If no error occurred +// for the associated item an error with a Null code and Null message will be +// present. +type CancellationReason struct { + + // Status code for the result of the cancelled transaction. + Code *string + + // Item in the request which caused the transaction to get cancelled. + Item map[string]AttributeValue + + // Cancellation reason message description. + Message *string + + noSmithyDocumentSerde +} + +// Represents the amount of provisioned throughput capacity consumed on a table or +// an index. +type Capacity struct { + + // The total number of capacity units consumed on a table or an index. + CapacityUnits *float64 + + // The total number of read capacity units consumed on a table or an index. + ReadCapacityUnits *float64 + + // The total number of write capacity units consumed on a table or an index. + WriteCapacityUnits *float64 + + noSmithyDocumentSerde +} + +// Represents the selection criteria for a Query or Scan operation: +// +// - For a Query operation, Condition is used for specifying the KeyConditions to +// use when querying a table or an index. For KeyConditions , only the following +// comparison operators are supported: +// +// EQ | LE | LT | GE | GT | BEGINS_WITH | BETWEEN +// +// Condition is also used in a QueryFilter , which evaluates the query results and +// +// returns only the desired values. +// +// - For a Scan operation, Condition is used in a ScanFilter , which evaluates +// the scan results and returns only the desired values. +type Condition struct { + + // A comparator for evaluating attributes. For example, equals, greater than, less + // than, etc. + // + // The following comparison operators are available: + // + // EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS | + // BEGINS_WITH | IN | BETWEEN + // + // The following are descriptions of each comparison operator. + // + // - EQ : Equal. EQ is supported for all data types, including lists and maps. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, Binary, String Set, Number Set, or Binary Set. If an item contains an + // AttributeValue element of a different type than the one provided in the + // request, the value does not match. For example, {"S":"6"} does not equal + // {"N":"6"} . Also, {"N":"6"} does not equal {"NS":["6", "2", "1"]} . + // + // - NE : Not equal. NE is supported for all data types, including lists and maps. + // + // AttributeValueList can contain only one AttributeValue of type String, Number, + // Binary, String Set, Number Set, or Binary Set. If an item contains an + // AttributeValue of a different type than the one provided in the request, the + // value does not match. For example, {"S":"6"} does not equal {"N":"6"} . Also, + // {"N":"6"} does not equal {"NS":["6", "2", "1"]} . + // + // - LE : Less than or equal. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value does + // not match. For example, {"S":"6"} does not equal {"N":"6"} . Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]} . + // + // - LT : Less than. + // + // AttributeValueList can contain only one AttributeValue of type String, Number, + // or Binary (not a set type). If an item contains an AttributeValue element of a + // different type than the one provided in the request, the value does not match. + // For example, {"S":"6"} does not equal {"N":"6"} . Also, {"N":"6"} does not + // compare to {"NS":["6", "2", "1"]} . + // + // - GE : Greater than or equal. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value does + // not match. For example, {"S":"6"} does not equal {"N":"6"} . Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]} . + // + // - GT : Greater than. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value does + // not match. For example, {"S":"6"} does not equal {"N":"6"} . Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]} . + // + // - NOT_NULL : The attribute exists. NOT_NULL is supported for all data types, + // including lists and maps. + // + // This operator tests for the existence of an attribute, not its data type. If + // the data type of attribute " a " is null, and you evaluate it using NOT_NULL , + // the result is a Boolean true . This result is because the attribute " a " + // exists; its data type is not relevant to the NOT_NULL comparison operator. + // + // - NULL : The attribute does not exist. NULL is supported for all data types, + // including lists and maps. + // + // This operator tests for the nonexistence of an attribute, not its data type. If + // the data type of attribute " a " is null, and you evaluate it using NULL , the + // result is a Boolean false . This is because the attribute " a " exists; its + // data type is not relevant to the NULL comparison operator. + // + // - CONTAINS : Checks for a subsequence, or value in a set. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If the target attribute of the comparison is + // of type String, then the operator checks for a substring match. If the target + // attribute of the comparison is of type Binary, then the operator looks for a + // subsequence of the target that matches the input. If the target attribute of the + // comparison is a set (" SS ", " NS ", or " BS "), then the operator evaluates + // to true if it finds an exact match with any member of the set. + // + // CONTAINS is supported for lists: When evaluating " a CONTAINS b ", " a " can be + // a list; however, " b " cannot be a set, a map, or a list. + // + // - NOT_CONTAINS : Checks for absence of a subsequence, or absence of a value in + // a set. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If the target attribute of the comparison is + // a String, then the operator checks for the absence of a substring match. If the + // target attribute of the comparison is Binary, then the operator checks for the + // absence of a subsequence of the target that matches the input. If the target + // attribute of the comparison is a set (" SS ", " NS ", or " BS "), then the + // operator evaluates to true if it does not find an exact match with any member of + // the set. + // + // NOT_CONTAINS is supported for lists: When evaluating " a NOT CONTAINS b ", " a " + // can be a list; however, " b " cannot be a set, a map, or a list. + // + // - BEGINS_WITH : Checks for a prefix. + // + // AttributeValueList can contain only one AttributeValue of type String or Binary + // (not a Number or a set type). The target attribute of the comparison must be of + // type String or Binary (not a Number or a set type). + // + // - IN : Checks for matching elements in a list. + // + // AttributeValueList can contain one or more AttributeValue elements of type + // String, Number, or Binary. These attributes are compared against an existing + // attribute of an item. If any elements of the input are equal to the item + // attribute, the expression evaluates to true. + // + // - BETWEEN : Greater than or equal to the first value, and less than or equal + // to the second value. + // + // AttributeValueList must contain two AttributeValue elements of the same type, + // either String, Number, or Binary (not a set type). A target attribute matches if + // the target value is greater than, or equal to, the first element and less than, + // or equal to, the second element. If an item contains an AttributeValue element + // of a different type than the one provided in the request, the value does not + // match. For example, {"S":"6"} does not compare to {"N":"6"} . Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]} + // + // For usage examples of AttributeValueList and ComparisonOperator , see [Legacy Conditional Parameters] in the + // Amazon DynamoDB Developer Guide. + // + // [Legacy Conditional Parameters]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.html + // + // This member is required. + ComparisonOperator ComparisonOperator + + // One or more values to evaluate against the supplied attribute. The number of + // values in the list depends on the ComparisonOperator being used. + // + // For type Number, value comparisons are numeric. + // + // String value comparisons for greater than, equals, or less than are based on + // ASCII character code values. For example, a is greater than A , and a is + // greater than B . For a list of code values, see [http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters]. + // + // For Binary, DynamoDB treats each byte of the binary data as unsigned when it + // compares binary values. + // + // [http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters]: http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters + AttributeValueList []AttributeValue + + noSmithyDocumentSerde +} + +// Represents a request to perform a check that an item exists or to check the +// condition of specific attributes of the item. +type ConditionCheck struct { + + // A condition that must be satisfied in order for a conditional update to + // succeed. For more information, see [Condition expressions]in the Amazon DynamoDB Developer Guide. + // + // [Condition expressions]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.ConditionExpressions.html + // + // This member is required. + ConditionExpression *string + + // The primary key of the item to be checked. Each element consists of an + // attribute name and a value for that attribute. + // + // This member is required. + Key map[string]AttributeValue + + // Name of the table for the check item request. You can also provide the Amazon + // Resource Name (ARN) of the table in this parameter. + // + // This member is required. + TableName *string + + // One or more substitution tokens for attribute names in an expression. For more + // information, see [Expression attribute names]in the Amazon DynamoDB Developer Guide. + // + // [Expression attribute names]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.ExpressionAttributeNames.html + ExpressionAttributeNames map[string]string + + // One or more values that can be substituted in an expression. For more + // information, see [Condition expressions]in the Amazon DynamoDB Developer Guide. + // + // [Condition expressions]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.ConditionExpressions.html + ExpressionAttributeValues map[string]AttributeValue + + // Use ReturnValuesOnConditionCheckFailure to get the item attributes if the + // ConditionCheck condition fails. For ReturnValuesOnConditionCheckFailure , the + // valid values are: NONE and ALL_OLD. + ReturnValuesOnConditionCheckFailure ReturnValuesOnConditionCheckFailure + + noSmithyDocumentSerde +} + +// The capacity units consumed by an operation. The data returned includes the +// total provisioned throughput consumed, along with statistics for the table and +// any indexes involved in the operation. ConsumedCapacity is only returned if the +// request asked for it. For more information, see [Provisioned capacity mode]in the Amazon DynamoDB +// Developer Guide. +// +// [Provisioned capacity mode]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/provisioned-capacity-mode.html +type ConsumedCapacity struct { + + // The total number of capacity units consumed by the operation. + CapacityUnits *float64 + + // The amount of throughput consumed on each global index affected by the + // operation. + GlobalSecondaryIndexes map[string]Capacity + + // The amount of throughput consumed on each local index affected by the operation. + LocalSecondaryIndexes map[string]Capacity + + // The total number of read capacity units consumed by the operation. + ReadCapacityUnits *float64 + + // The amount of throughput consumed on the table affected by the operation. + Table *Capacity + + // The name of the table that was affected by the operation. If you had specified + // the Amazon Resource Name (ARN) of a table in the input, you'll see the table ARN + // in the response. + TableName *string + + // The total number of write capacity units consumed by the operation. + WriteCapacityUnits *float64 + + noSmithyDocumentSerde +} + +// Represents the continuous backups and point in time recovery settings on the +// table. +type ContinuousBackupsDescription struct { + + // ContinuousBackupsStatus can be one of the following states: ENABLED, DISABLED + // + // This member is required. + ContinuousBackupsStatus ContinuousBackupsStatus + + // The description of the point in time recovery settings applied to the table. + PointInTimeRecoveryDescription *PointInTimeRecoveryDescription + + noSmithyDocumentSerde +} + +// Represents a Contributor Insights summary entry. +type ContributorInsightsSummary struct { + + // Indicates the current mode of CloudWatch Contributor Insights, specifying + // whether it tracks all access and throttled events or throttled events only for + // the DynamoDB table or index. + ContributorInsightsMode ContributorInsightsMode + + // Describes the current status for contributor insights for the given table and + // index, if applicable. + ContributorInsightsStatus ContributorInsightsStatus + + // Name of the index associated with the summary, if any. + IndexName *string + + // Name of the table associated with the summary. + TableName *string + + noSmithyDocumentSerde +} + +// Represents a new global secondary index to be added to an existing table. +type CreateGlobalSecondaryIndexAction struct { + + // The name of the global secondary index to be created. + // + // This member is required. + IndexName *string + + // The key schema for the global secondary index. + // + // This member is required. + KeySchema []KeySchemaElement + + // Represents attributes that are copied (projected) from the table into an index. + // These are in addition to the primary key attributes and index key attributes, + // which are automatically projected. + // + // This member is required. + Projection *Projection + + // The maximum number of read and write units for the global secondary index being + // created. If you use this parameter, you must specify MaxReadRequestUnits , + // MaxWriteRequestUnits , or both. You must use either OnDemand Throughput or + // ProvisionedThroughput based on your table's capacity mode. + OnDemandThroughput *OnDemandThroughput + + // Represents the provisioned throughput settings for the specified global + // secondary index. + // + // For current minimum and maximum provisioned throughput values, see [Service, Account, and Table Quotas] in the + // Amazon DynamoDB Developer Guide. + // + // [Service, Account, and Table Quotas]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html + ProvisionedThroughput *ProvisionedThroughput + + // Represents the warm throughput value (in read units per second and write units + // per second) when creating a secondary index. + WarmThroughput *WarmThroughput + + noSmithyDocumentSerde +} + +// Specifies the action to add a new witness Region to a MRSC global table. A MRSC +// global table can be configured with either three replicas, or with two replicas +// and one witness. +type CreateGlobalTableWitnessGroupMemberAction struct { + + // The Amazon Web Services Region name to be added as a witness Region for the + // MRSC global table. The witness must be in a different Region than the replicas + // and within the same Region set: + // + // - US Region set: US East (N. Virginia), US East (Ohio), US West (Oregon) + // + // - EU Region set: Europe (Ireland), Europe (London), Europe (Paris), Europe + // (Frankfurt) + // + // - AP Region set: Asia Pacific (Tokyo), Asia Pacific (Seoul), Asia Pacific + // (Osaka) + // + // This member is required. + RegionName *string + + noSmithyDocumentSerde +} + +// Represents a replica to be added. +type CreateReplicaAction struct { + + // The Region of the replica to be added. + // + // This member is required. + RegionName *string + + noSmithyDocumentSerde +} + +// Represents a replica to be created. +type CreateReplicationGroupMemberAction struct { + + // The Region where the new replica will be created. + // + // This member is required. + RegionName *string + + // Replica-specific global secondary index settings. + GlobalSecondaryIndexes []ReplicaGlobalSecondaryIndex + + // The KMS key that should be used for KMS encryption in the new replica. To + // specify a key, use its key ID, Amazon Resource Name (ARN), alias name, or alias + // ARN. Note that you should only provide this parameter if the key is different + // from the default DynamoDB KMS key alias/aws/dynamodb . + KMSMasterKeyId *string + + // The maximum on-demand throughput settings for the specified replica table being + // created. You can only modify MaxReadRequestUnits , because you can't modify + // MaxWriteRequestUnits for individual replica tables. + OnDemandThroughputOverride *OnDemandThroughputOverride + + // Replica-specific provisioned throughput. If not specified, uses the source + // table's provisioned throughput settings. + ProvisionedThroughputOverride *ProvisionedThroughputOverride + + // Replica-specific table class. If not specified, uses the source table's table + // class. + TableClassOverride TableClass + + noSmithyDocumentSerde +} + +// Processing options for the CSV file being imported. +type CsvOptions struct { + + // The delimiter used for separating items in the CSV file being imported. + Delimiter *string + + // List of the headers used to specify a common header for all source CSV files + // being imported. If this field is specified then the first line of each CSV file + // is treated as data instead of the header. If this field is not specified the the + // first line of each CSV file is treated as the header. + HeaderList []string + + noSmithyDocumentSerde +} + +// Represents a request to perform a DeleteItem operation. +type Delete struct { + + // The primary key of the item to be deleted. Each element consists of an + // attribute name and a value for that attribute. + // + // This member is required. + Key map[string]AttributeValue + + // Name of the table in which the item to be deleted resides. You can also provide + // the Amazon Resource Name (ARN) of the table in this parameter. + // + // This member is required. + TableName *string + + // A condition that must be satisfied in order for a conditional delete to succeed. + ConditionExpression *string + + // One or more substitution tokens for attribute names in an expression. + ExpressionAttributeNames map[string]string + + // One or more values that can be substituted in an expression. + ExpressionAttributeValues map[string]AttributeValue + + // Use ReturnValuesOnConditionCheckFailure to get the item attributes if the Delete + // condition fails. For ReturnValuesOnConditionCheckFailure , the valid values are: + // NONE and ALL_OLD. + ReturnValuesOnConditionCheckFailure ReturnValuesOnConditionCheckFailure + + noSmithyDocumentSerde +} + +// Represents a global secondary index to be deleted from an existing table. +type DeleteGlobalSecondaryIndexAction struct { + + // The name of the global secondary index to be deleted. + // + // This member is required. + IndexName *string + + noSmithyDocumentSerde +} + +// Specifies the action to remove a witness Region from a MRSC global table. You +// cannot delete a single witness from a MRSC global table - you must delete both a +// replica and the witness together. The deletion of both a witness and replica +// converts the remaining replica to a single-Region DynamoDB table. +type DeleteGlobalTableWitnessGroupMemberAction struct { + + // The witness Region name to be removed from the MRSC global table. + // + // This member is required. + RegionName *string + + noSmithyDocumentSerde +} + +// Represents a replica to be removed. +type DeleteReplicaAction struct { + + // The Region of the replica to be removed. + // + // This member is required. + RegionName *string + + noSmithyDocumentSerde +} + +// Represents a replica to be deleted. +type DeleteReplicationGroupMemberAction struct { + + // The Region where the replica exists. + // + // This member is required. + RegionName *string + + noSmithyDocumentSerde +} + +// Represents a request to perform a DeleteItem operation on an item. +type DeleteRequest struct { + + // A map of attribute name to attribute values, representing the primary key of + // the item to delete. All of the table's primary key attributes must be specified, + // and their data types must match those of the table's key schema. + // + // This member is required. + Key map[string]AttributeValue + + noSmithyDocumentSerde +} + +// Enables setting the configuration for Kinesis Streaming. +type EnableKinesisStreamingConfiguration struct { + + // Toggle for the precision of Kinesis data stream timestamp. The values are + // either MILLISECOND or MICROSECOND . + ApproximateCreationDateTimePrecision ApproximateCreationDateTimePrecision + + noSmithyDocumentSerde +} + +// An endpoint information details. +type Endpoint struct { + + // IP address of the endpoint. + // + // This member is required. + Address *string + + // Endpoint cache time to live (TTL) value. + // + // This member is required. + CachePeriodInMinutes int64 + + noSmithyDocumentSerde +} + +// Represents a condition to be compared with an attribute value. This condition +// can be used with DeleteItem , PutItem , or UpdateItem operations; if the +// comparison evaluates to true, the operation succeeds; if not, the operation +// fails. You can use ExpectedAttributeValue in one of two different ways: +// +// - Use AttributeValueList to specify one or more values to compare against an +// attribute. Use ComparisonOperator to specify how you want to perform the +// comparison. If the comparison evaluates to true, then the conditional operation +// succeeds. +// +// - Use Value to specify a value that DynamoDB will compare against an +// attribute. If the values match, then ExpectedAttributeValue evaluates to true +// and the conditional operation succeeds. Optionally, you can also set Exists to +// false, indicating that you do not expect to find the attribute value in the +// table. In this case, the conditional operation succeeds only if the comparison +// evaluates to false. +// +// Value and Exists are incompatible with AttributeValueList and ComparisonOperator +// . Note that if you use both sets of parameters at once, DynamoDB will return a +// ValidationException exception. +type ExpectedAttributeValue struct { + + // One or more values to evaluate against the supplied attribute. The number of + // values in the list depends on the ComparisonOperator being used. + // + // For type Number, value comparisons are numeric. + // + // String value comparisons for greater than, equals, or less than are based on + // ASCII character code values. For example, a is greater than A , and a is + // greater than B . For a list of code values, see [http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters]. + // + // For Binary, DynamoDB treats each byte of the binary data as unsigned when it + // compares binary values. + // + // For information on specifying data types in JSON, see [JSON Data Format] in the Amazon DynamoDB + // Developer Guide. + // + // [http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters]: http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters + // [JSON Data Format]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataFormat.html + AttributeValueList []AttributeValue + + // A comparator for evaluating attributes in the AttributeValueList . For example, + // equals, greater than, less than, etc. + // + // The following comparison operators are available: + // + // EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS | + // BEGINS_WITH | IN | BETWEEN + // + // The following are descriptions of each comparison operator. + // + // - EQ : Equal. EQ is supported for all data types, including lists and maps. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, Binary, String Set, Number Set, or Binary Set. If an item contains an + // AttributeValue element of a different type than the one provided in the + // request, the value does not match. For example, {"S":"6"} does not equal + // {"N":"6"} . Also, {"N":"6"} does not equal {"NS":["6", "2", "1"]} . + // + // - NE : Not equal. NE is supported for all data types, including lists and maps. + // + // AttributeValueList can contain only one AttributeValue of type String, Number, + // Binary, String Set, Number Set, or Binary Set. If an item contains an + // AttributeValue of a different type than the one provided in the request, the + // value does not match. For example, {"S":"6"} does not equal {"N":"6"} . Also, + // {"N":"6"} does not equal {"NS":["6", "2", "1"]} . + // + // - LE : Less than or equal. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value does + // not match. For example, {"S":"6"} does not equal {"N":"6"} . Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]} . + // + // - LT : Less than. + // + // AttributeValueList can contain only one AttributeValue of type String, Number, + // or Binary (not a set type). If an item contains an AttributeValue element of a + // different type than the one provided in the request, the value does not match. + // For example, {"S":"6"} does not equal {"N":"6"} . Also, {"N":"6"} does not + // compare to {"NS":["6", "2", "1"]} . + // + // - GE : Greater than or equal. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value does + // not match. For example, {"S":"6"} does not equal {"N":"6"} . Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]} . + // + // - GT : Greater than. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value does + // not match. For example, {"S":"6"} does not equal {"N":"6"} . Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]} . + // + // - NOT_NULL : The attribute exists. NOT_NULL is supported for all data types, + // including lists and maps. + // + // This operator tests for the existence of an attribute, not its data type. If + // the data type of attribute " a " is null, and you evaluate it using NOT_NULL , + // the result is a Boolean true . This result is because the attribute " a " + // exists; its data type is not relevant to the NOT_NULL comparison operator. + // + // - NULL : The attribute does not exist. NULL is supported for all data types, + // including lists and maps. + // + // This operator tests for the nonexistence of an attribute, not its data type. If + // the data type of attribute " a " is null, and you evaluate it using NULL , the + // result is a Boolean false . This is because the attribute " a " exists; its + // data type is not relevant to the NULL comparison operator. + // + // - CONTAINS : Checks for a subsequence, or value in a set. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If the target attribute of the comparison is + // of type String, then the operator checks for a substring match. If the target + // attribute of the comparison is of type Binary, then the operator looks for a + // subsequence of the target that matches the input. If the target attribute of the + // comparison is a set (" SS ", " NS ", or " BS "), then the operator evaluates + // to true if it finds an exact match with any member of the set. + // + // CONTAINS is supported for lists: When evaluating " a CONTAINS b ", " a " can be + // a list; however, " b " cannot be a set, a map, or a list. + // + // - NOT_CONTAINS : Checks for absence of a subsequence, or absence of a value in + // a set. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If the target attribute of the comparison is + // a String, then the operator checks for the absence of a substring match. If the + // target attribute of the comparison is Binary, then the operator checks for the + // absence of a subsequence of the target that matches the input. If the target + // attribute of the comparison is a set (" SS ", " NS ", or " BS "), then the + // operator evaluates to true if it does not find an exact match with any member of + // the set. + // + // NOT_CONTAINS is supported for lists: When evaluating " a NOT CONTAINS b ", " a " + // can be a list; however, " b " cannot be a set, a map, or a list. + // + // - BEGINS_WITH : Checks for a prefix. + // + // AttributeValueList can contain only one AttributeValue of type String or Binary + // (not a Number or a set type). The target attribute of the comparison must be of + // type String or Binary (not a Number or a set type). + // + // - IN : Checks for matching elements in a list. + // + // AttributeValueList can contain one or more AttributeValue elements of type + // String, Number, or Binary. These attributes are compared against an existing + // attribute of an item. If any elements of the input are equal to the item + // attribute, the expression evaluates to true. + // + // - BETWEEN : Greater than or equal to the first value, and less than or equal + // to the second value. + // + // AttributeValueList must contain two AttributeValue elements of the same type, + // either String, Number, or Binary (not a set type). A target attribute matches if + // the target value is greater than, or equal to, the first element and less than, + // or equal to, the second element. If an item contains an AttributeValue element + // of a different type than the one provided in the request, the value does not + // match. For example, {"S":"6"} does not compare to {"N":"6"} . Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]} + ComparisonOperator ComparisonOperator + + // Causes DynamoDB to evaluate the value before attempting a conditional operation: + // + // - If Exists is true , DynamoDB will check to see if that attribute value + // already exists in the table. If it is found, then the operation succeeds. If it + // is not found, the operation fails with a ConditionCheckFailedException . + // + // - If Exists is false , DynamoDB assumes that the attribute value does not + // exist in the table. If in fact the value does not exist, then the assumption is + // valid and the operation succeeds. If the value is found, despite the assumption + // that it does not exist, the operation fails with a + // ConditionCheckFailedException . + // + // The default setting for Exists is true . If you supply a Value all by itself, + // DynamoDB assumes the attribute exists: You don't have to set Exists to true , + // because it is implied. + // + // DynamoDB returns a ValidationException if: + // + // - Exists is true but there is no Value to check. (You expect a value to exist, + // but don't specify what that value is.) + // + // - Exists is false but you also provide a Value . (You cannot expect an + // attribute to have a value, while also expecting it not to exist.) + Exists *bool + + // Represents the data for the expected attribute. + // + // Each attribute value is described as a name-value pair. The name is the data + // type, and the value is the data itself. + // + // For more information, see [Data Types] in the Amazon DynamoDB Developer Guide. + // + // [Data Types]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.NamingRulesDataTypes.html#HowItWorks.DataTypes + Value AttributeValue + + noSmithyDocumentSerde +} + +// Represents the properties of the exported table. +type ExportDescription struct { + + // The billable size of the table export. + BilledSizeBytes *int64 + + // The client token that was provided for the export task. A client token makes + // calls to ExportTableToPointInTimeInput idempotent, meaning that multiple + // identical calls have the same effect as one single call. + ClientToken *string + + // The time at which the export task completed. + EndTime *time.Time + + // The Amazon Resource Name (ARN) of the table export. + ExportArn *string + + // The format of the exported data. Valid values for ExportFormat are DYNAMODB_JSON + // or ION . + ExportFormat ExportFormat + + // The name of the manifest file for the export task. + ExportManifest *string + + // Export can be in one of the following states: IN_PROGRESS, COMPLETED, or FAILED. + ExportStatus ExportStatus + + // Point in time from which table data was exported. + ExportTime *time.Time + + // The type of export that was performed. Valid values are FULL_EXPORT or + // INCREMENTAL_EXPORT . + ExportType ExportType + + // Status code for the result of the failed export. + FailureCode *string + + // Export failure reason description. + FailureMessage *string + + // Optional object containing the parameters specific to an incremental export. + IncrementalExportSpecification *IncrementalExportSpecification + + // The number of items exported. + ItemCount *int64 + + // The name of the Amazon S3 bucket containing the export. + S3Bucket *string + + // The ID of the Amazon Web Services account that owns the bucket containing the + // export. + S3BucketOwner *string + + // The Amazon S3 bucket prefix used as the file name and path of the exported + // snapshot. + S3Prefix *string + + // Type of encryption used on the bucket where export data is stored. Valid values + // for S3SseAlgorithm are: + // + // - AES256 - server-side encryption with Amazon S3 managed keys + // + // - KMS - server-side encryption with KMS managed keys + S3SseAlgorithm S3SseAlgorithm + + // The ID of the KMS managed key used to encrypt the S3 bucket where export data + // is stored (if applicable). + S3SseKmsKeyId *string + + // The time at which the export task began. + StartTime *time.Time + + // The Amazon Resource Name (ARN) of the table that was exported. + TableArn *string + + // Unique ID of the table that was exported. + TableId *string + + noSmithyDocumentSerde +} + +// Summary information about an export task. +type ExportSummary struct { + + // The Amazon Resource Name (ARN) of the export. + ExportArn *string + + // Export can be in one of the following states: IN_PROGRESS, COMPLETED, or FAILED. + ExportStatus ExportStatus + + // The type of export that was performed. Valid values are FULL_EXPORT or + // INCREMENTAL_EXPORT . + ExportType ExportType + + noSmithyDocumentSerde +} + +// Represents a failure a contributor insights operation. +type FailureException struct { + + // Description of the failure. + ExceptionDescription *string + + // Exception name. + ExceptionName *string + + noSmithyDocumentSerde +} + +// Specifies an item and related attribute values to retrieve in a TransactGetItem +// object. +type Get struct { + + // A map of attribute names to AttributeValue objects that specifies the primary + // key of the item to retrieve. + // + // This member is required. + Key map[string]AttributeValue + + // The name of the table from which to retrieve the specified item. You can also + // provide the Amazon Resource Name (ARN) of the table in this parameter. + // + // This member is required. + TableName *string + + // One or more substitution tokens for attribute names in the ProjectionExpression + // parameter. + ExpressionAttributeNames map[string]string + + // A string that identifies one or more attributes of the specified item to + // retrieve from the table. The attributes in the expression must be separated by + // commas. If no attribute names are specified, then all attributes of the + // specified item are returned. If any of the requested attributes are not found, + // they do not appear in the result. + ProjectionExpression *string + + noSmithyDocumentSerde +} + +// Represents the properties of a global secondary index. +type GlobalSecondaryIndex struct { + + // The name of the global secondary index. The name must be unique among all other + // indexes on this table. + // + // This member is required. + IndexName *string + + // The complete key schema for a global secondary index, which consists of one or + // more pairs of attribute names and key types: + // + // - HASH - partition key + // + // - RANGE - sort key + // + // The partition key of an item is also known as its hash attribute. The term + // "hash attribute" derives from DynamoDB's usage of an internal hash function to + // evenly distribute data items across partitions, based on their partition key + // values. + // + // The sort key of an item is also known as its range attribute. The term "range + // attribute" derives from the way DynamoDB stores items with the same partition + // key physically close together, in sorted order by the sort key value. + // + // This member is required. + KeySchema []KeySchemaElement + + // Represents attributes that are copied (projected) from the table into the + // global secondary index. These are in addition to the primary key attributes and + // index key attributes, which are automatically projected. + // + // This member is required. + Projection *Projection + + // The maximum number of read and write units for the specified global secondary + // index. If you use this parameter, you must specify MaxReadRequestUnits , + // MaxWriteRequestUnits , or both. You must use either OnDemandThroughput or + // ProvisionedThroughput based on your table's capacity mode. + OnDemandThroughput *OnDemandThroughput + + // Represents the provisioned throughput settings for the specified global + // secondary index. You must use either OnDemandThroughput or ProvisionedThroughput + // based on your table's capacity mode. + // + // For current minimum and maximum provisioned throughput values, see [Service, Account, and Table Quotas] in the + // Amazon DynamoDB Developer Guide. + // + // [Service, Account, and Table Quotas]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html + ProvisionedThroughput *ProvisionedThroughput + + // Represents the warm throughput value (in read units per second and write units + // per second) for the specified secondary index. If you use this parameter, you + // must specify ReadUnitsPerSecond , WriteUnitsPerSecond , or both. + WarmThroughput *WarmThroughput + + noSmithyDocumentSerde +} + +// Represents the auto scaling settings of a global secondary index for a global +// table that will be modified. +type GlobalSecondaryIndexAutoScalingUpdate struct { + + // The name of the global secondary index. + IndexName *string + + // Represents the auto scaling settings to be modified for a global table or + // global secondary index. + ProvisionedWriteCapacityAutoScalingUpdate *AutoScalingSettingsUpdate + + noSmithyDocumentSerde +} + +// Represents the properties of a global secondary index. +type GlobalSecondaryIndexDescription struct { + + // Indicates whether the index is currently backfilling. Backfilling is the + // process of reading items from the table and determining whether they can be + // added to the index. (Not all items will qualify: For example, a partition key + // cannot have any duplicate values.) If an item can be added to the index, + // DynamoDB will do so. After all items have been processed, the backfilling + // operation is complete and Backfilling is false. + // + // You can delete an index that is being created during the Backfilling phase when + // IndexStatus is set to CREATING and Backfilling is true. You can't delete the + // index that is being created when IndexStatus is set to CREATING and Backfilling + // is false. + // + // For indexes that were created during a CreateTable operation, the Backfilling + // attribute does not appear in the DescribeTable output. + Backfilling *bool + + // The Amazon Resource Name (ARN) that uniquely identifies the index. + IndexArn *string + + // The name of the global secondary index. + IndexName *string + + // The total size of the specified index, in bytes. DynamoDB updates this value + // approximately every six hours. Recent changes might not be reflected in this + // value. + IndexSizeBytes *int64 + + // The current state of the global secondary index: + // + // - CREATING - The index is being created. + // + // - UPDATING - The index is being updated. + // + // - DELETING - The index is being deleted. + // + // - ACTIVE - The index is ready for use. + IndexStatus IndexStatus + + // The number of items in the specified index. DynamoDB updates this value + // approximately every six hours. Recent changes might not be reflected in this + // value. + ItemCount *int64 + + // The complete key schema for a global secondary index, which consists of one or + // more pairs of attribute names and key types: + // + // - HASH - partition key + // + // - RANGE - sort key + // + // The partition key of an item is also known as its hash attribute. The term + // "hash attribute" derives from DynamoDB's usage of an internal hash function to + // evenly distribute data items across partitions, based on their partition key + // values. + // + // The sort key of an item is also known as its range attribute. The term "range + // attribute" derives from the way DynamoDB stores items with the same partition + // key physically close together, in sorted order by the sort key value. + KeySchema []KeySchemaElement + + // The maximum number of read and write units for the specified global secondary + // index. If you use this parameter, you must specify MaxReadRequestUnits , + // MaxWriteRequestUnits , or both. + OnDemandThroughput *OnDemandThroughput + + // Represents attributes that are copied (projected) from the table into the + // global secondary index. These are in addition to the primary key attributes and + // index key attributes, which are automatically projected. + Projection *Projection + + // Represents the provisioned throughput settings for the specified global + // secondary index. + // + // For current minimum and maximum provisioned throughput values, see [Service, Account, and Table Quotas] in the + // Amazon DynamoDB Developer Guide. + // + // [Service, Account, and Table Quotas]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html + ProvisionedThroughput *ProvisionedThroughputDescription + + // Represents the warm throughput value (in read units per second and write units + // per second) for the specified secondary index. + WarmThroughput *GlobalSecondaryIndexWarmThroughputDescription + + noSmithyDocumentSerde +} + +// Represents the properties of a global secondary index for the table when the +// backup was created. +type GlobalSecondaryIndexInfo struct { + + // The name of the global secondary index. + IndexName *string + + // The complete key schema for a global secondary index, which consists of one or + // more pairs of attribute names and key types: + // + // - HASH - partition key + // + // - RANGE - sort key + // + // The partition key of an item is also known as its hash attribute. The term + // "hash attribute" derives from DynamoDB's usage of an internal hash function to + // evenly distribute data items across partitions, based on their partition key + // values. + // + // The sort key of an item is also known as its range attribute. The term "range + // attribute" derives from the way DynamoDB stores items with the same partition + // key physically close together, in sorted order by the sort key value. + KeySchema []KeySchemaElement + + // Sets the maximum number of read and write units for the specified on-demand + // table. If you use this parameter, you must specify MaxReadRequestUnits , + // MaxWriteRequestUnits , or both. + OnDemandThroughput *OnDemandThroughput + + // Represents attributes that are copied (projected) from the table into the + // global secondary index. These are in addition to the primary key attributes and + // index key attributes, which are automatically projected. + Projection *Projection + + // Represents the provisioned throughput settings for the specified global + // secondary index. + ProvisionedThroughput *ProvisionedThroughput + + noSmithyDocumentSerde +} + +// Represents one of the following: +// +// - A new global secondary index to be added to an existing table. +// +// - New provisioned throughput parameters for an existing global secondary +// index. +// +// - An existing global secondary index to be removed from an existing table. +type GlobalSecondaryIndexUpdate struct { + + // The parameters required for creating a global secondary index on an existing + // table: + // + // - IndexName + // + // - KeySchema + // + // - AttributeDefinitions + // + // - Projection + // + // - ProvisionedThroughput + Create *CreateGlobalSecondaryIndexAction + + // The name of an existing global secondary index to be removed. + Delete *DeleteGlobalSecondaryIndexAction + + // The name of an existing global secondary index, along with new provisioned + // throughput settings to be applied to that index. + Update *UpdateGlobalSecondaryIndexAction + + noSmithyDocumentSerde +} + +// The description of the warm throughput value on a global secondary index. +type GlobalSecondaryIndexWarmThroughputDescription struct { + + // Represents warm throughput read units per second value for a global secondary + // index. + ReadUnitsPerSecond *int64 + + // Represents the warm throughput status being created or updated on a global + // secondary index. The status can only be UPDATING or ACTIVE . + Status IndexStatus + + // Represents warm throughput write units per second value for a global secondary + // index. + WriteUnitsPerSecond *int64 + + noSmithyDocumentSerde +} + +// Represents the properties of a global table. +type GlobalTable struct { + + // The global table name. + GlobalTableName *string + + // The Regions where the global table has replicas. + ReplicationGroup []Replica + + noSmithyDocumentSerde +} + +// Contains details about the global table. +type GlobalTableDescription struct { + + // The creation time of the global table. + CreationDateTime *time.Time + + // The unique identifier of the global table. + GlobalTableArn *string + + // The global table name. + GlobalTableName *string + + // The current state of the global table: + // + // - CREATING - The global table is being created. + // + // - UPDATING - The global table is being updated. + // + // - DELETING - The global table is being deleted. + // + // - ACTIVE - The global table is ready for use. + GlobalTableStatus GlobalTableStatus + + // The Regions where the global table has replicas. + ReplicationGroup []ReplicaDescription + + noSmithyDocumentSerde +} + +// Represents the settings of a global secondary index for a global table that +// will be modified. +type GlobalTableGlobalSecondaryIndexSettingsUpdate struct { + + // The name of the global secondary index. The name must be unique among all other + // indexes on this table. + // + // This member is required. + IndexName *string + + // Auto scaling settings for managing a global secondary index's write capacity + // units. + ProvisionedWriteCapacityAutoScalingSettingsUpdate *AutoScalingSettingsUpdate + + // The maximum number of writes consumed per second before DynamoDB returns a + // ThrottlingException. + ProvisionedWriteCapacityUnits *int64 + + noSmithyDocumentSerde +} + +// Represents the properties of a witness Region in a MRSC global table. +type GlobalTableWitnessDescription struct { + + // The name of the Amazon Web Services Region that serves as a witness for the + // MRSC global table. + RegionName *string + + // The current status of the witness Region in the MRSC global table. + WitnessStatus WitnessStatus + + noSmithyDocumentSerde +} + +// Represents one of the following: +// +// - A new witness to be added to a new global table. +// +// - An existing witness to be removed from an existing global table. +// +// You can configure one witness per MRSC global table. +type GlobalTableWitnessGroupUpdate struct { + + // Specifies a witness Region to be added to a new MRSC global table. The witness + // must be added when creating the MRSC global table. + Create *CreateGlobalTableWitnessGroupMemberAction + + // Specifies a witness Region to be removed from an existing global table. Must be + // done in conjunction with removing a replica. The deletion of both a witness and + // replica converts the remaining replica to a single-Region DynamoDB table. + Delete *DeleteGlobalTableWitnessGroupMemberAction + + noSmithyDocumentSerde +} + +// Summary information about the source file for the import. +type ImportSummary struct { + + // The Amazon Resource Number (ARN) of the Cloudwatch Log Group associated with + // this import task. + CloudWatchLogGroupArn *string + + // The time at which this import task ended. (Does this include the successful + // complete creation of the table it was imported to?) + EndTime *time.Time + + // The Amazon Resource Number (ARN) corresponding to the import request. + ImportArn *string + + // The status of the import operation. + ImportStatus ImportStatus + + // The format of the source data. Valid values are CSV , DYNAMODB_JSON or ION . + InputFormat InputFormat + + // The path and S3 bucket of the source file that is being imported. This + // includes the S3Bucket (required), S3KeyPrefix (optional) and S3BucketOwner + // (optional if the bucket is owned by the requester). + S3BucketSource *S3BucketSource + + // The time at which this import task began. + StartTime *time.Time + + // The Amazon Resource Number (ARN) of the table being imported into. + TableArn *string + + noSmithyDocumentSerde +} + +// Represents the properties of the table being imported into. +type ImportTableDescription struct { + + // The client token that was provided for the import task. Reusing the client + // token on retry makes a call to ImportTable idempotent. + ClientToken *string + + // The Amazon Resource Number (ARN) of the Cloudwatch Log Group associated with + // the target table. + CloudWatchLogGroupArn *string + + // The time at which the creation of the table associated with this import task + // completed. + EndTime *time.Time + + // The number of errors occurred on importing the source file into the target + // table. + ErrorCount int64 + + // The error code corresponding to the failure that the import job ran into + // during execution. + FailureCode *string + + // The error message corresponding to the failure that the import job ran into + // during execution. + FailureMessage *string + + // The Amazon Resource Number (ARN) corresponding to the import request. + ImportArn *string + + // The status of the import. + ImportStatus ImportStatus + + // The number of items successfully imported into the new table. + ImportedItemCount int64 + + // The compression options for the data that has been imported into the target + // table. The values are NONE, GZIP, or ZSTD. + InputCompressionType InputCompressionType + + // The format of the source data going into the target table. + InputFormat InputFormat + + // The format options for the data that was imported into the target table. There + // is one value, CsvOption. + InputFormatOptions *InputFormatOptions + + // The total number of items processed from the source file. + ProcessedItemCount int64 + + // The total size of data processed from the source file, in Bytes. + ProcessedSizeBytes *int64 + + // Values for the S3 bucket the source file is imported from. Includes bucket + // name (required), key prefix (optional) and bucket account owner ID (optional). + S3BucketSource *S3BucketSource + + // The time when this import task started. + StartTime *time.Time + + // The Amazon Resource Number (ARN) of the table being imported into. + TableArn *string + + // The parameters for the new table that is being imported into. + TableCreationParameters *TableCreationParameters + + // The table id corresponding to the table created by import table process. + TableId *string + + noSmithyDocumentSerde +} + +// Optional object containing the parameters specific to an incremental export. +type IncrementalExportSpecification struct { + + // Time in the past which provides the inclusive start range for the export + // table's data, counted in seconds from the start of the Unix epoch. The + // incremental export will reflect the table's state including and after this point + // in time. + ExportFromTime *time.Time + + // Time in the past which provides the exclusive end range for the export table's + // data, counted in seconds from the start of the Unix epoch. The incremental + // export will reflect the table's state just prior to this point in time. If this + // is not provided, the latest time with data available will be used. + ExportToTime *time.Time + + // The view type that was chosen for the export. Valid values are + // NEW_AND_OLD_IMAGES and NEW_IMAGES . The default value is NEW_AND_OLD_IMAGES . + ExportViewType ExportViewType + + noSmithyDocumentSerde +} + +// The format options for the data that was imported into the target table. There +// +// is one value, CsvOption. +type InputFormatOptions struct { + + // The options for imported source files in CSV format. The values are Delimiter + // and HeaderList. + Csv *CsvOptions + + noSmithyDocumentSerde +} + +// Information about item collections, if any, that were affected by the +// operation. ItemCollectionMetrics is only returned if the request asked for it. +// If the table does not have any local secondary indexes, this information is not +// returned in the response. +type ItemCollectionMetrics struct { + + // The partition key value of the item collection. This value is the same as the + // partition key value of the item. + ItemCollectionKey map[string]AttributeValue + + // An estimate of item collection size, in gigabytes. This value is a two-element + // array containing a lower bound and an upper bound for the estimate. The estimate + // includes the size of all the items in the table, plus the size of all attributes + // projected into all of the local secondary indexes on that table. Use this + // estimate to measure whether a local secondary index is approaching its size + // limit. + // + // The estimate is subject to change over time; therefore, do not rely on the + // precision or accuracy of the estimate. + SizeEstimateRangeGB []float64 + + noSmithyDocumentSerde +} + +// Details for the requested item. +type ItemResponse struct { + + // Map of attribute data consisting of the data type and attribute value. + Item map[string]AttributeValue + + noSmithyDocumentSerde +} + +// Represents a set of primary keys and, for each key, the attributes to retrieve +// from the table. +// +// For each primary key, you must provide all of the key attributes. For example, +// with a simple primary key, you only need to provide the partition key. For a +// composite primary key, you must provide both the partition key and the sort key. +type KeysAndAttributes struct { + + // The primary key attribute values that define the items and the attributes + // associated with the items. + // + // This member is required. + Keys []map[string]AttributeValue + + // This is a legacy parameter. Use ProjectionExpression instead. For more + // information, see [Legacy Conditional Parameters]in the Amazon DynamoDB Developer Guide. + // + // [Legacy Conditional Parameters]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.html + AttributesToGet []string + + // The consistency of a read operation. If set to true , then a strongly consistent + // read is used; otherwise, an eventually consistent read is used. + ConsistentRead *bool + + // One or more substitution tokens for attribute names in an expression. The + // following are some use cases for using ExpressionAttributeNames : + // + // - To access an attribute whose name conflicts with a DynamoDB reserved word. + // + // - To create a placeholder for repeating occurrences of an attribute name in + // an expression. + // + // - To prevent special characters in an attribute name from being + // misinterpreted in an expression. + // + // Use the # character in an expression to dereference an attribute name. For + // example, consider the following attribute name: + // + // - Percentile + // + // The name of this attribute conflicts with a reserved word, so it cannot be used + // directly in an expression. (For the complete list of reserved words, see [Reserved Words]in the + // Amazon DynamoDB Developer Guide). To work around this, you could specify the + // following for ExpressionAttributeNames : + // + // - {"#P":"Percentile"} + // + // You could then use this substitution in an expression, as in this example: + // + // - #P = :val + // + // Tokens that begin with the : character are expression attribute values, which + // are placeholders for the actual value at runtime. + // + // For more information on expression attribute names, see [Accessing Item Attributes] in the Amazon DynamoDB + // Developer Guide. + // + // [Reserved Words]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html + // [Accessing Item Attributes]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html + ExpressionAttributeNames map[string]string + + // A string that identifies one or more attributes to retrieve from the table. + // These attributes can include scalars, sets, or elements of a JSON document. The + // attributes in the ProjectionExpression must be separated by commas. + // + // If no attribute names are specified, then all attributes will be returned. If + // any of the requested attributes are not found, they will not appear in the + // result. + // + // For more information, see [Accessing Item Attributes] in the Amazon DynamoDB Developer Guide. + // + // [Accessing Item Attributes]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html + ProjectionExpression *string + + noSmithyDocumentSerde +} + +// Represents a single element of a key schema. A key schema specifies the +// attributes that make up the primary key of a table, or the key attributes of an +// index. +// +// A KeySchemaElement represents exactly one attribute of the primary key. For +// example, a simple primary key would be represented by one KeySchemaElement (for +// the partition key). A composite primary key would require one KeySchemaElement +// for the partition key, and another KeySchemaElement for the sort key. +// +// A KeySchemaElement must be a scalar, top-level attribute (not a nested +// attribute). The data type must be one of String, Number, or Binary. The +// attribute cannot be nested within a List or a Map. +type KeySchemaElement struct { + + // The name of a key attribute. + // + // This member is required. + AttributeName *string + + // The role that this key attribute will assume: + // + // - HASH - partition key + // + // - RANGE - sort key + // + // The partition key of an item is also known as its hash attribute. The term + // "hash attribute" derives from DynamoDB's usage of an internal hash function to + // evenly distribute data items across partitions, based on their partition key + // values. + // + // The sort key of an item is also known as its range attribute. The term "range + // attribute" derives from the way DynamoDB stores items with the same partition + // key physically close together, in sorted order by the sort key value. + // + // This member is required. + KeyType KeyType + + noSmithyDocumentSerde +} + +// Describes a Kinesis data stream destination. +type KinesisDataStreamDestination struct { + + // The precision of the Kinesis data stream timestamp. The values are either + // MILLISECOND or MICROSECOND . + ApproximateCreationDateTimePrecision ApproximateCreationDateTimePrecision + + // The current status of replication. + DestinationStatus DestinationStatus + + // The human-readable string that corresponds to the replica status. + DestinationStatusDescription *string + + // The ARN for a specific Kinesis data stream. + StreamArn *string + + noSmithyDocumentSerde +} + +// Represents the properties of a local secondary index. +type LocalSecondaryIndex struct { + + // The name of the local secondary index. The name must be unique among all other + // indexes on this table. + // + // This member is required. + IndexName *string + + // The complete key schema for the local secondary index, consisting of one or + // more pairs of attribute names and key types: + // + // - HASH - partition key + // + // - RANGE - sort key + // + // The partition key of an item is also known as its hash attribute. The term + // "hash attribute" derives from DynamoDB's usage of an internal hash function to + // evenly distribute data items across partitions, based on their partition key + // values. + // + // The sort key of an item is also known as its range attribute. The term "range + // attribute" derives from the way DynamoDB stores items with the same partition + // key physically close together, in sorted order by the sort key value. + // + // This member is required. + KeySchema []KeySchemaElement + + // Represents attributes that are copied (projected) from the table into the local + // secondary index. These are in addition to the primary key attributes and index + // key attributes, which are automatically projected. + // + // This member is required. + Projection *Projection + + noSmithyDocumentSerde +} + +// Represents the properties of a local secondary index. +type LocalSecondaryIndexDescription struct { + + // The Amazon Resource Name (ARN) that uniquely identifies the index. + IndexArn *string + + // Represents the name of the local secondary index. + IndexName *string + + // The total size of the specified index, in bytes. DynamoDB updates this value + // approximately every six hours. Recent changes might not be reflected in this + // value. + IndexSizeBytes *int64 + + // The number of items in the specified index. DynamoDB updates this value + // approximately every six hours. Recent changes might not be reflected in this + // value. + ItemCount *int64 + + // The complete key schema for the local secondary index, consisting of one or + // more pairs of attribute names and key types: + // + // - HASH - partition key + // + // - RANGE - sort key + // + // The partition key of an item is also known as its hash attribute. The term + // "hash attribute" derives from DynamoDB's usage of an internal hash function to + // evenly distribute data items across partitions, based on their partition key + // values. + // + // The sort key of an item is also known as its range attribute. The term "range + // attribute" derives from the way DynamoDB stores items with the same partition + // key physically close together, in sorted order by the sort key value. + KeySchema []KeySchemaElement + + // Represents attributes that are copied (projected) from the table into the + // global secondary index. These are in addition to the primary key attributes and + // index key attributes, which are automatically projected. + Projection *Projection + + noSmithyDocumentSerde +} + +// Represents the properties of a local secondary index for the table when the +// backup was created. +type LocalSecondaryIndexInfo struct { + + // Represents the name of the local secondary index. + IndexName *string + + // The complete key schema for a local secondary index, which consists of one or + // more pairs of attribute names and key types: + // + // - HASH - partition key + // + // - RANGE - sort key + // + // The partition key of an item is also known as its hash attribute. The term + // "hash attribute" derives from DynamoDB's usage of an internal hash function to + // evenly distribute data items across partitions, based on their partition key + // values. + // + // The sort key of an item is also known as its range attribute. The term "range + // attribute" derives from the way DynamoDB stores items with the same partition + // key physically close together, in sorted order by the sort key value. + KeySchema []KeySchemaElement + + // Represents attributes that are copied (projected) from the table into the + // global secondary index. These are in addition to the primary key attributes and + // index key attributes, which are automatically projected. + Projection *Projection + + noSmithyDocumentSerde +} + +// Sets the maximum number of read and write units for the specified on-demand +// table. If you use this parameter, you must specify MaxReadRequestUnits , +// MaxWriteRequestUnits , or both. +type OnDemandThroughput struct { + + // Maximum number of read request units for the specified table. + // + // To specify a maximum OnDemandThroughput on your table, set the value of + // MaxReadRequestUnits as greater than or equal to 1. To remove the maximum + // OnDemandThroughput that is currently set on your table, set the value of + // MaxReadRequestUnits to -1. + MaxReadRequestUnits *int64 + + // Maximum number of write request units for the specified table. + // + // To specify a maximum OnDemandThroughput on your table, set the value of + // MaxWriteRequestUnits as greater than or equal to 1. To remove the maximum + // OnDemandThroughput that is currently set on your table, set the value of + // MaxWriteRequestUnits to -1. + MaxWriteRequestUnits *int64 + + noSmithyDocumentSerde +} + +// Overrides the on-demand throughput settings for this replica table. If you +// don't specify a value for this parameter, it uses the source table's on-demand +// throughput settings. +type OnDemandThroughputOverride struct { + + // Maximum number of read request units for the specified replica table. + MaxReadRequestUnits *int64 + + noSmithyDocumentSerde +} + +// Represents a PartiQL statement that uses parameters. +type ParameterizedStatement struct { + + // A PartiQL statement that uses parameters. + // + // This member is required. + Statement *string + + // The parameter values. + Parameters []AttributeValue + + // An optional parameter that returns the item attributes for a PartiQL + // ParameterizedStatement operation that failed a condition check. + // + // There is no additional cost associated with requesting a return value aside + // from the small network and processing overhead of receiving a larger response. + // No read capacity units are consumed. + ReturnValuesOnConditionCheckFailure ReturnValuesOnConditionCheckFailure + + noSmithyDocumentSerde +} + +// The description of the point in time settings applied to the table. +type PointInTimeRecoveryDescription struct { + + // Specifies the earliest point in time you can restore your table to. You can + // restore your table to any point in time during the last 35 days. + EarliestRestorableDateTime *time.Time + + // LatestRestorableDateTime is typically 5 minutes before the current time. + LatestRestorableDateTime *time.Time + + // The current state of point in time recovery: + // + // - ENABLED - Point in time recovery is enabled. + // + // - DISABLED - Point in time recovery is disabled. + PointInTimeRecoveryStatus PointInTimeRecoveryStatus + + // The number of preceding days for which continuous backups are taken and + // maintained. Your table data is only recoverable to any point-in-time from within + // the configured recovery period. This parameter is optional. + RecoveryPeriodInDays *int32 + + noSmithyDocumentSerde +} + +// Represents the settings used to enable point in time recovery. +type PointInTimeRecoverySpecification struct { + + // Indicates whether point in time recovery is enabled (true) or disabled (false) + // on the table. + // + // This member is required. + PointInTimeRecoveryEnabled *bool + + // The number of preceding days for which continuous backups are taken and + // maintained. Your table data is only recoverable to any point-in-time from within + // the configured recovery period. This parameter is optional. If no value is + // provided, the value will default to 35. + RecoveryPeriodInDays *int32 + + noSmithyDocumentSerde +} + +// Represents attributes that are copied (projected) from the table into an index. +// These are in addition to the primary key attributes and index key attributes, +// which are automatically projected. +type Projection struct { + + // Represents the non-key attribute names which will be projected into the index. + // + // For global and local secondary indexes, the total count of NonKeyAttributes + // summed across all of the secondary indexes, must not exceed 100. If you project + // the same attribute into two different indexes, this counts as two distinct + // attributes when determining the total. This limit only applies when you specify + // the ProjectionType of INCLUDE . You still can specify the ProjectionType of ALL + // to project all attributes from the source table, even if the table has more than + // 100 attributes. + NonKeyAttributes []string + + // The set of attributes that are projected into the index: + // + // - KEYS_ONLY - Only the index and primary keys are projected into the index. + // + // - INCLUDE - In addition to the attributes described in KEYS_ONLY , the + // secondary index will include other non-key attributes that you specify. + // + // - ALL - All of the table attributes are projected into the index. + // + // When using the DynamoDB console, ALL is selected by default. + ProjectionType ProjectionType + + noSmithyDocumentSerde +} + +// Represents the provisioned throughput settings for the specified global +// secondary index. You must use ProvisionedThroughput or OnDemandThroughput based +// on your table’s capacity mode. +// +// For current minimum and maximum provisioned throughput values, see [Service, Account, and Table Quotas] in the +// Amazon DynamoDB Developer Guide. +// +// [Service, Account, and Table Quotas]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html +type ProvisionedThroughput struct { + + // The maximum number of strongly consistent reads consumed per second before + // DynamoDB returns a ThrottlingException . For more information, see [Specifying Read and Write Requirements] in the + // Amazon DynamoDB Developer Guide. + // + // If read/write capacity mode is PAY_PER_REQUEST the value is set to 0. + // + // [Specifying Read and Write Requirements]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughput.html + // + // This member is required. + ReadCapacityUnits *int64 + + // The maximum number of writes consumed per second before DynamoDB returns a + // ThrottlingException . For more information, see [Specifying Read and Write Requirements] in the Amazon DynamoDB + // Developer Guide. + // + // If read/write capacity mode is PAY_PER_REQUEST the value is set to 0. + // + // [Specifying Read and Write Requirements]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughput.html + // + // This member is required. + WriteCapacityUnits *int64 + + noSmithyDocumentSerde +} + +// Represents the provisioned throughput settings for the table, consisting of +// read and write capacity units, along with data about increases and decreases. +type ProvisionedThroughputDescription struct { + + // The date and time of the last provisioned throughput decrease for this table. + LastDecreaseDateTime *time.Time + + // The date and time of the last provisioned throughput increase for this table. + LastIncreaseDateTime *time.Time + + // The number of provisioned throughput decreases for this table during this UTC + // calendar day. For current maximums on provisioned throughput decreases, see [Service, Account, and Table Quotas]in + // the Amazon DynamoDB Developer Guide. + // + // [Service, Account, and Table Quotas]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html + NumberOfDecreasesToday *int64 + + // The maximum number of strongly consistent reads consumed per second before + // DynamoDB returns a ThrottlingException . Eventually consistent reads require + // less effort than strongly consistent reads, so a setting of 50 ReadCapacityUnits + // per second provides 100 eventually consistent ReadCapacityUnits per second. + ReadCapacityUnits *int64 + + // The maximum number of writes consumed per second before DynamoDB returns a + // ThrottlingException . + WriteCapacityUnits *int64 + + noSmithyDocumentSerde +} + +// Replica-specific provisioned throughput settings. If not specified, uses the +// source table's provisioned throughput settings. +type ProvisionedThroughputOverride struct { + + // Replica-specific read capacity units. If not specified, uses the source table's + // read capacity settings. + ReadCapacityUnits *int64 + + noSmithyDocumentSerde +} + +// Represents a request to perform a PutItem operation. +type Put struct { + + // A map of attribute name to attribute values, representing the primary key of + // the item to be written by PutItem . All of the table's primary key attributes + // must be specified, and their data types must match those of the table's key + // schema. If any attributes are present in the item that are part of an index key + // schema for the table, their types must match the index key schema. + // + // This member is required. + Item map[string]AttributeValue + + // Name of the table in which to write the item. You can also provide the Amazon + // Resource Name (ARN) of the table in this parameter. + // + // This member is required. + TableName *string + + // A condition that must be satisfied in order for a conditional update to succeed. + ConditionExpression *string + + // One or more substitution tokens for attribute names in an expression. + ExpressionAttributeNames map[string]string + + // One or more values that can be substituted in an expression. + ExpressionAttributeValues map[string]AttributeValue + + // Use ReturnValuesOnConditionCheckFailure to get the item attributes if the Put + // condition fails. For ReturnValuesOnConditionCheckFailure , the valid values are: + // NONE and ALL_OLD. + ReturnValuesOnConditionCheckFailure ReturnValuesOnConditionCheckFailure + + noSmithyDocumentSerde +} + +// Represents a request to perform a PutItem operation on an item. +type PutRequest struct { + + // A map of attribute name to attribute values, representing the primary key of an + // item to be processed by PutItem . All of the table's primary key attributes must + // be specified, and their data types must match those of the table's key schema. + // If any attributes are present in the item that are part of an index key schema + // for the table, their types must match the index key schema. + // + // This member is required. + Item map[string]AttributeValue + + noSmithyDocumentSerde +} + +// Represents the properties of a replica. +type Replica struct { + + // The Region where the replica needs to be created. + RegionName *string + + noSmithyDocumentSerde +} + +// Represents the auto scaling settings of the replica. +type ReplicaAutoScalingDescription struct { + + // Replica-specific global secondary index auto scaling settings. + GlobalSecondaryIndexes []ReplicaGlobalSecondaryIndexAutoScalingDescription + + // The Region where the replica exists. + RegionName *string + + // Represents the auto scaling settings for a global table or global secondary + // index. + ReplicaProvisionedReadCapacityAutoScalingSettings *AutoScalingSettingsDescription + + // Represents the auto scaling settings for a global table or global secondary + // index. + ReplicaProvisionedWriteCapacityAutoScalingSettings *AutoScalingSettingsDescription + + // The current state of the replica: + // + // - CREATING - The replica is being created. + // + // - UPDATING - The replica is being updated. + // + // - DELETING - The replica is being deleted. + // + // - ACTIVE - The replica is ready for use. + ReplicaStatus ReplicaStatus + + noSmithyDocumentSerde +} + +// Represents the auto scaling settings of a replica that will be modified. +type ReplicaAutoScalingUpdate struct { + + // The Region where the replica exists. + // + // This member is required. + RegionName *string + + // Represents the auto scaling settings of global secondary indexes that will be + // modified. + ReplicaGlobalSecondaryIndexUpdates []ReplicaGlobalSecondaryIndexAutoScalingUpdate + + // Represents the auto scaling settings to be modified for a global table or + // global secondary index. + ReplicaProvisionedReadCapacityAutoScalingUpdate *AutoScalingSettingsUpdate + + noSmithyDocumentSerde +} + +// Contains the details of the replica. +type ReplicaDescription struct { + + // Replica-specific global secondary index settings. + GlobalSecondaryIndexes []ReplicaGlobalSecondaryIndexDescription + + // The KMS key of the replica that will be used for KMS encryption. + KMSMasterKeyId *string + + // Overrides the maximum on-demand throughput settings for the specified replica + // table. + OnDemandThroughputOverride *OnDemandThroughputOverride + + // Replica-specific provisioned throughput. If not described, uses the source + // table's provisioned throughput settings. + ProvisionedThroughputOverride *ProvisionedThroughputOverride + + // The name of the Region. + RegionName *string + + // The time at which the replica was first detected as inaccessible. To determine + // cause of inaccessibility check the ReplicaStatus property. + ReplicaInaccessibleDateTime *time.Time + + // The current state of the replica: + // + // - CREATING - The replica is being created. + // + // - UPDATING - The replica is being updated. + // + // - DELETING - The replica is being deleted. + // + // - ACTIVE - The replica is ready for use. + // + // - REGION_DISABLED - The replica is inaccessible because the Amazon Web + // Services Region has been disabled. + // + // If the Amazon Web Services Region remains inaccessible for more than 20 hours, + // DynamoDB will remove this replica from the replication group. The replica will + // not be deleted and replication will stop from and to this region. + // + // - INACCESSIBLE_ENCRYPTION_CREDENTIALS - The KMS key used to encrypt the table + // is inaccessible. + // + // If the KMS key remains inaccessible for more than 20 hours, DynamoDB will + // remove this replica from the replication group. The replica will not be deleted + // and replication will stop from and to this region. + ReplicaStatus ReplicaStatus + + // Detailed information about the replica status. + ReplicaStatusDescription *string + + // Specifies the progress of a Create, Update, or Delete action on the replica as + // a percentage. + ReplicaStatusPercentProgress *string + + // Contains details of the table class. + ReplicaTableClassSummary *TableClassSummary + + // Represents the warm throughput value for this replica. + WarmThroughput *TableWarmThroughputDescription + + noSmithyDocumentSerde +} + +// Represents the properties of a replica global secondary index. +type ReplicaGlobalSecondaryIndex struct { + + // The name of the global secondary index. + // + // This member is required. + IndexName *string + + // Overrides the maximum on-demand throughput settings for the specified global + // secondary index in the specified replica table. + OnDemandThroughputOverride *OnDemandThroughputOverride + + // Replica table GSI-specific provisioned throughput. If not specified, uses the + // source table GSI's read capacity settings. + ProvisionedThroughputOverride *ProvisionedThroughputOverride + + noSmithyDocumentSerde +} + +// Represents the auto scaling configuration for a replica global secondary index. +type ReplicaGlobalSecondaryIndexAutoScalingDescription struct { + + // The name of the global secondary index. + IndexName *string + + // The current state of the replica global secondary index: + // + // - CREATING - The index is being created. + // + // - UPDATING - The table/index configuration is being updated. The table/index + // remains available for data operations when UPDATING + // + // - DELETING - The index is being deleted. + // + // - ACTIVE - The index is ready for use. + IndexStatus IndexStatus + + // Represents the auto scaling settings for a global table or global secondary + // index. + ProvisionedReadCapacityAutoScalingSettings *AutoScalingSettingsDescription + + // Represents the auto scaling settings for a global table or global secondary + // index. + ProvisionedWriteCapacityAutoScalingSettings *AutoScalingSettingsDescription + + noSmithyDocumentSerde +} + +// Represents the auto scaling settings of a global secondary index for a replica +// that will be modified. +type ReplicaGlobalSecondaryIndexAutoScalingUpdate struct { + + // The name of the global secondary index. + IndexName *string + + // Represents the auto scaling settings to be modified for a global table or + // global secondary index. + ProvisionedReadCapacityAutoScalingUpdate *AutoScalingSettingsUpdate + + noSmithyDocumentSerde +} + +// Represents the properties of a replica global secondary index. +type ReplicaGlobalSecondaryIndexDescription struct { + + // The name of the global secondary index. + IndexName *string + + // Overrides the maximum on-demand throughput for the specified global secondary + // index in the specified replica table. + OnDemandThroughputOverride *OnDemandThroughputOverride + + // If not described, uses the source table GSI's read capacity settings. + ProvisionedThroughputOverride *ProvisionedThroughputOverride + + // Represents the warm throughput of the global secondary index for this replica. + WarmThroughput *GlobalSecondaryIndexWarmThroughputDescription + + noSmithyDocumentSerde +} + +// Represents the properties of a global secondary index. +type ReplicaGlobalSecondaryIndexSettingsDescription struct { + + // The name of the global secondary index. The name must be unique among all other + // indexes on this table. + // + // This member is required. + IndexName *string + + // The current status of the global secondary index: + // + // - CREATING - The global secondary index is being created. + // + // - UPDATING - The global secondary index is being updated. + // + // - DELETING - The global secondary index is being deleted. + // + // - ACTIVE - The global secondary index is ready for use. + IndexStatus IndexStatus + + // Auto scaling settings for a global secondary index replica's read capacity + // units. + ProvisionedReadCapacityAutoScalingSettings *AutoScalingSettingsDescription + + // The maximum number of strongly consistent reads consumed per second before + // DynamoDB returns a ThrottlingException . + ProvisionedReadCapacityUnits *int64 + + // Auto scaling settings for a global secondary index replica's write capacity + // units. + ProvisionedWriteCapacityAutoScalingSettings *AutoScalingSettingsDescription + + // The maximum number of writes consumed per second before DynamoDB returns a + // ThrottlingException . + ProvisionedWriteCapacityUnits *int64 + + noSmithyDocumentSerde +} + +// Represents the settings of a global secondary index for a global table that +// will be modified. +type ReplicaGlobalSecondaryIndexSettingsUpdate struct { + + // The name of the global secondary index. The name must be unique among all other + // indexes on this table. + // + // This member is required. + IndexName *string + + // Auto scaling settings for managing a global secondary index replica's read + // capacity units. + ProvisionedReadCapacityAutoScalingSettingsUpdate *AutoScalingSettingsUpdate + + // The maximum number of strongly consistent reads consumed per second before + // DynamoDB returns a ThrottlingException . + ProvisionedReadCapacityUnits *int64 + + noSmithyDocumentSerde +} + +// Represents the properties of a replica. +type ReplicaSettingsDescription struct { + + // The Region name of the replica. + // + // This member is required. + RegionName *string + + // The read/write capacity mode of the replica. + ReplicaBillingModeSummary *BillingModeSummary + + // Replica global secondary index settings for the global table. + ReplicaGlobalSecondaryIndexSettings []ReplicaGlobalSecondaryIndexSettingsDescription + + // Auto scaling settings for a global table replica's read capacity units. + ReplicaProvisionedReadCapacityAutoScalingSettings *AutoScalingSettingsDescription + + // The maximum number of strongly consistent reads consumed per second before + // DynamoDB returns a ThrottlingException . For more information, see [Specifying Read and Write Requirements] in the + // Amazon DynamoDB Developer Guide. + // + // [Specifying Read and Write Requirements]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#ProvisionedThroughput + ReplicaProvisionedReadCapacityUnits *int64 + + // Auto scaling settings for a global table replica's write capacity units. + ReplicaProvisionedWriteCapacityAutoScalingSettings *AutoScalingSettingsDescription + + // The maximum number of writes consumed per second before DynamoDB returns a + // ThrottlingException . For more information, see [Specifying Read and Write Requirements] in the Amazon DynamoDB + // Developer Guide. + // + // [Specifying Read and Write Requirements]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#ProvisionedThroughput + ReplicaProvisionedWriteCapacityUnits *int64 + + // The current state of the Region: + // + // - CREATING - The Region is being created. + // + // - UPDATING - The Region is being updated. + // + // - DELETING - The Region is being deleted. + // + // - ACTIVE - The Region is ready for use. + ReplicaStatus ReplicaStatus + + // Contains details of the table class. + ReplicaTableClassSummary *TableClassSummary + + noSmithyDocumentSerde +} + +// Represents the settings for a global table in a Region that will be modified. +type ReplicaSettingsUpdate struct { + + // The Region of the replica to be added. + // + // This member is required. + RegionName *string + + // Represents the settings of a global secondary index for a global table that + // will be modified. + ReplicaGlobalSecondaryIndexSettingsUpdate []ReplicaGlobalSecondaryIndexSettingsUpdate + + // Auto scaling settings for managing a global table replica's read capacity units. + ReplicaProvisionedReadCapacityAutoScalingSettingsUpdate *AutoScalingSettingsUpdate + + // The maximum number of strongly consistent reads consumed per second before + // DynamoDB returns a ThrottlingException . For more information, see [Specifying Read and Write Requirements] in the + // Amazon DynamoDB Developer Guide. + // + // [Specifying Read and Write Requirements]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#ProvisionedThroughput + ReplicaProvisionedReadCapacityUnits *int64 + + // Replica-specific table class. If not specified, uses the source table's table + // class. + ReplicaTableClass TableClass + + noSmithyDocumentSerde +} + +// Represents one of the following: +// +// - A new replica to be added to an existing regional table or global table. +// This request invokes the CreateTableReplica action in the destination Region. +// +// - New parameters for an existing replica. This request invokes the UpdateTable +// action in the destination Region. +// +// - An existing replica to be deleted. The request invokes the +// DeleteTableReplica action in the destination Region, deleting the replica and +// all if its items in the destination Region. +// +// When you manually remove a table or global table replica, you do not +// automatically remove any associated scalable targets, scaling policies, or +// CloudWatch alarms. +type ReplicationGroupUpdate struct { + + // The parameters required for creating a replica for the table. + Create *CreateReplicationGroupMemberAction + + // The parameters required for deleting a replica for the table. + Delete *DeleteReplicationGroupMemberAction + + // The parameters required for updating a replica for the table. + Update *UpdateReplicationGroupMemberAction + + noSmithyDocumentSerde +} + +// Represents one of the following: +// +// - A new replica to be added to an existing global table. +// +// - New parameters for an existing replica. +// +// - An existing replica to be removed from an existing global table. +type ReplicaUpdate struct { + + // The parameters required for creating a replica on an existing global table. + Create *CreateReplicaAction + + // The name of the existing replica to be removed. + Delete *DeleteReplicaAction + + noSmithyDocumentSerde +} + +// Contains details for the restore. +type RestoreSummary struct { + + // Point in time or source backup time. + // + // This member is required. + RestoreDateTime *time.Time + + // Indicates if a restore is in progress or not. + // + // This member is required. + RestoreInProgress *bool + + // The Amazon Resource Name (ARN) of the backup from which the table was restored. + SourceBackupArn *string + + // The ARN of the source table of the backup that is being restored. + SourceTableArn *string + + noSmithyDocumentSerde +} + +// The S3 bucket that is being imported from. +type S3BucketSource struct { + + // The S3 bucket that is being imported from. + // + // This member is required. + S3Bucket *string + + // The account number of the S3 bucket that is being imported from. If the bucket + // is owned by the requester this is optional. + S3BucketOwner *string + + // The key prefix shared by all S3 Objects that are being imported. + S3KeyPrefix *string + + noSmithyDocumentSerde +} + +// Contains the details of the table when the backup was created. +type SourceTableDetails struct { + + // Schema of the table. + // + // This member is required. + KeySchema []KeySchemaElement + + // Read IOPs and Write IOPS on the table when the backup was created. + // + // This member is required. + ProvisionedThroughput *ProvisionedThroughput + + // Time when the source table was created. + // + // This member is required. + TableCreationDateTime *time.Time + + // Unique identifier for the table for which the backup was created. + // + // This member is required. + TableId *string + + // The name of the table for which the backup was created. + // + // This member is required. + TableName *string + + // Controls how you are charged for read and write throughput and how you manage + // capacity. This setting can be changed later. + // + // - PROVISIONED - Sets the read/write capacity mode to PROVISIONED . We + // recommend using PROVISIONED for predictable workloads. + // + // - PAY_PER_REQUEST - Sets the read/write capacity mode to PAY_PER_REQUEST . We + // recommend using PAY_PER_REQUEST for unpredictable workloads. + BillingMode BillingMode + + // Number of items in the table. Note that this is an approximate value. + ItemCount *int64 + + // Sets the maximum number of read and write units for the specified on-demand + // table. If you use this parameter, you must specify MaxReadRequestUnits , + // MaxWriteRequestUnits , or both. + OnDemandThroughput *OnDemandThroughput + + // ARN of the table for which backup was created. + TableArn *string + + // Size of the table in bytes. Note that this is an approximate value. + TableSizeBytes *int64 + + noSmithyDocumentSerde +} + +// Contains the details of the features enabled on the table when the backup was +// created. For example, LSIs, GSIs, streams, TTL. +type SourceTableFeatureDetails struct { + + // Represents the GSI properties for the table when the backup was created. It + // includes the IndexName, KeySchema, Projection, and ProvisionedThroughput for the + // GSIs on the table at the time of backup. + GlobalSecondaryIndexes []GlobalSecondaryIndexInfo + + // Represents the LSI properties for the table when the backup was created. It + // includes the IndexName, KeySchema and Projection for the LSIs on the table at + // the time of backup. + LocalSecondaryIndexes []LocalSecondaryIndexInfo + + // The description of the server-side encryption status on the table when the + // backup was created. + SSEDescription *SSEDescription + + // Stream settings on the table when the backup was created. + StreamDescription *StreamSpecification + + // Time to Live settings on the table when the backup was created. + TimeToLiveDescription *TimeToLiveDescription + + noSmithyDocumentSerde +} + +// The description of the server-side encryption status on the specified table. +type SSEDescription struct { + + // Indicates the time, in UNIX epoch date format, when DynamoDB detected that the + // table's KMS key was inaccessible. This attribute will automatically be cleared + // when DynamoDB detects that the table's KMS key is accessible again. DynamoDB + // will initiate the table archival process when table's KMS key remains + // inaccessible for more than seven days from this date. + InaccessibleEncryptionDateTime *time.Time + + // The KMS key ARN used for the KMS encryption. + KMSMasterKeyArn *string + + // Server-side encryption type. The only supported value is: + // + // - KMS - Server-side encryption that uses Key Management Service. The key is + // stored in your account and is managed by KMS (KMS charges apply). + SSEType SSEType + + // Represents the current state of server-side encryption. The only supported + // values are: + // + // - ENABLED - Server-side encryption is enabled. + // + // - UPDATING - Server-side encryption is being updated. + Status SSEStatus + + noSmithyDocumentSerde +} + +// Represents the settings used to enable server-side encryption. +type SSESpecification struct { + + // Indicates whether server-side encryption is done using an Amazon Web Services + // managed key or an Amazon Web Services owned key. If enabled (true), server-side + // encryption type is set to KMS and an Amazon Web Services managed key is used + // (KMS charges apply). If disabled (false) or not specified, server-side + // encryption is set to Amazon Web Services owned key. + Enabled *bool + + // The KMS key that should be used for the KMS encryption. To specify a key, use + // its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. Note that you + // should only provide this parameter if the key is different from the default + // DynamoDB key alias/aws/dynamodb . + KMSMasterKeyId *string + + // Server-side encryption type. The only supported value is: + // + // - KMS - Server-side encryption that uses Key Management Service. The key is + // stored in your account and is managed by KMS (KMS charges apply). + SSEType SSEType + + noSmithyDocumentSerde +} + +// Represents the DynamoDB Streams configuration for a table in DynamoDB. +type StreamSpecification struct { + + // Indicates whether DynamoDB Streams is enabled (true) or disabled (false) on the + // table. + // + // This member is required. + StreamEnabled *bool + + // When an item in the table is modified, StreamViewType determines what + // information is written to the stream for this table. Valid values for + // StreamViewType are: + // + // - KEYS_ONLY - Only the key attributes of the modified item are written to the + // stream. + // + // - NEW_IMAGE - The entire item, as it appears after it was modified, is written + // to the stream. + // + // - OLD_IMAGE - The entire item, as it appeared before it was modified, is + // written to the stream. + // + // - NEW_AND_OLD_IMAGES - Both the new and the old item images of the item are + // written to the stream. + StreamViewType StreamViewType + + noSmithyDocumentSerde +} + +// Represents the auto scaling configuration for a global table. +type TableAutoScalingDescription struct { + + // Represents replicas of the global table. + Replicas []ReplicaAutoScalingDescription + + // The name of the table. + TableName *string + + // The current state of the table: + // + // - CREATING - The table is being created. + // + // - UPDATING - The table is being updated. + // + // - DELETING - The table is being deleted. + // + // - ACTIVE - The table is ready for use. + TableStatus TableStatus + + noSmithyDocumentSerde +} + +// Contains details of the table class. +type TableClassSummary struct { + + // The date and time at which the table class was last updated. + LastUpdateDateTime *time.Time + + // The table class of the specified table. Valid values are STANDARD and + // STANDARD_INFREQUENT_ACCESS . + TableClass TableClass + + noSmithyDocumentSerde +} + +// The parameters for the table created as part of the import operation. +type TableCreationParameters struct { + + // The attributes of the table created as part of the import operation. + // + // This member is required. + AttributeDefinitions []AttributeDefinition + + // The primary key and option sort key of the table created as part of the import + // operation. + // + // This member is required. + KeySchema []KeySchemaElement + + // The name of the table created as part of the import operation. + // + // This member is required. + TableName *string + + // The billing mode for provisioning the table created as part of the import + // operation. + BillingMode BillingMode + + // The Global Secondary Indexes (GSI) of the table to be created as part of the + // import operation. + GlobalSecondaryIndexes []GlobalSecondaryIndex + + // Sets the maximum number of read and write units for the specified on-demand + // table. If you use this parameter, you must specify MaxReadRequestUnits , + // MaxWriteRequestUnits , or both. + OnDemandThroughput *OnDemandThroughput + + // Represents the provisioned throughput settings for the specified global + // secondary index. You must use ProvisionedThroughput or OnDemandThroughput based + // on your table’s capacity mode. + // + // For current minimum and maximum provisioned throughput values, see [Service, Account, and Table Quotas] in the + // Amazon DynamoDB Developer Guide. + // + // [Service, Account, and Table Quotas]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html + ProvisionedThroughput *ProvisionedThroughput + + // Represents the settings used to enable server-side encryption. + SSESpecification *SSESpecification + + noSmithyDocumentSerde +} + +// Represents the properties of a table. +type TableDescription struct { + + // Contains information about the table archive. + ArchivalSummary *ArchivalSummary + + // An array of AttributeDefinition objects. Each of these objects describes one + // attribute in the table and index key schema. + // + // Each AttributeDefinition object in this array is composed of: + // + // - AttributeName - The name of the attribute. + // + // - AttributeType - The data type for the attribute. + AttributeDefinitions []AttributeDefinition + + // Contains the details for the read/write capacity mode. + BillingModeSummary *BillingModeSummary + + // The date and time when the table was created, in [UNIX epoch time] format. + // + // [UNIX epoch time]: http://www.epochconverter.com/ + CreationDateTime *time.Time + + // Indicates whether deletion protection is enabled (true) or disabled (false) on + // the table. + DeletionProtectionEnabled *bool + + // The global secondary indexes, if any, on the table. Each index is scoped to a + // given partition key value. Each element is composed of: + // + // - Backfilling - If true, then the index is currently in the backfilling phase. + // Backfilling occurs only when a new global secondary index is added to the table. + // It is the process by which DynamoDB populates the new index with data from the + // table. (This attribute does not appear for indexes that were created during a + // CreateTable operation.) + // + // You can delete an index that is being created during the Backfilling phase when + // IndexStatus is set to CREATING and Backfilling is true. You can't delete the + // index that is being created when IndexStatus is set to CREATING and + // Backfilling is false. (This attribute does not appear for indexes that were + // created during a CreateTable operation.) + // + // - IndexName - The name of the global secondary index. + // + // - IndexSizeBytes - The total size of the global secondary index, in bytes. + // DynamoDB updates this value approximately every six hours. Recent changes might + // not be reflected in this value. + // + // - IndexStatus - The current status of the global secondary index: + // + // - CREATING - The index is being created. + // + // - UPDATING - The index is being updated. + // + // - DELETING - The index is being deleted. + // + // - ACTIVE - The index is ready for use. + // + // - ItemCount - The number of items in the global secondary index. DynamoDB + // updates this value approximately every six hours. Recent changes might not be + // reflected in this value. + // + // - KeySchema - Specifies the complete index key schema. The attribute names in + // the key schema must be between 1 and 255 characters (inclusive). The key schema + // must begin with the same partition key as the table. + // + // - Projection - Specifies attributes that are copied (projected) from the table + // into the index. These are in addition to the primary key attributes and index + // key attributes, which are automatically projected. Each attribute specification + // is composed of: + // + // - ProjectionType - One of the following: + // + // - KEYS_ONLY - Only the index and primary keys are projected into the index. + // + // - INCLUDE - In addition to the attributes described in KEYS_ONLY , the + // secondary index will include other non-key attributes that you specify. + // + // - ALL - All of the table attributes are projected into the index. + // + // - NonKeyAttributes - A list of one or more non-key attribute names that are + // projected into the secondary index. The total count of attributes provided in + // NonKeyAttributes , summed across all of the secondary indexes, must not exceed + // 100. If you project the same attribute into two different indexes, this counts + // as two distinct attributes when determining the total. This limit only applies + // when you specify the ProjectionType of INCLUDE . You still can specify the + // ProjectionType of ALL to project all attributes from the source table, even if + // the table has more than 100 attributes. + // + // - ProvisionedThroughput - The provisioned throughput settings for the global + // secondary index, consisting of read and write capacity units, along with data + // about increases and decreases. + // + // If the table is in the DELETING state, no information about indexes will be + // returned. + GlobalSecondaryIndexes []GlobalSecondaryIndexDescription + + // Represents the version of [global tables] in use, if the table is replicated across Amazon Web + // Services Regions. + // + // [global tables]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GlobalTables.html + GlobalTableVersion *string + + // The witness Region and its current status in the MRSC global table. Only one + // witness Region can be configured per MRSC global table. + GlobalTableWitnesses []GlobalTableWitnessDescription + + // The number of items in the specified table. DynamoDB updates this value + // approximately every six hours. Recent changes might not be reflected in this + // value. + ItemCount *int64 + + // The primary key structure for the table. Each KeySchemaElement consists of: + // + // - AttributeName - The name of the attribute. + // + // - KeyType - The role of the attribute: + // + // - HASH - partition key + // + // - RANGE - sort key + // + // The partition key of an item is also known as its hash attribute. The term + // "hash attribute" derives from DynamoDB's usage of an internal hash function to + // evenly distribute data items across partitions, based on their partition key + // values. + // + // The sort key of an item is also known as its range attribute. The term "range + // attribute" derives from the way DynamoDB stores items with the same partition + // key physically close together, in sorted order by the sort key value. + // + // For more information about primary keys, see [Primary Key] in the Amazon DynamoDB Developer + // Guide. + // + // [Primary Key]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html#DataModelPrimaryKey + KeySchema []KeySchemaElement + + // The Amazon Resource Name (ARN) that uniquely identifies the latest stream for + // this table. + LatestStreamArn *string + + // A timestamp, in ISO 8601 format, for this stream. + // + // Note that LatestStreamLabel is not a unique identifier for the stream, because + // it is possible that a stream from another table might have the same timestamp. + // However, the combination of the following three elements is guaranteed to be + // unique: + // + // - Amazon Web Services customer ID + // + // - Table name + // + // - StreamLabel + LatestStreamLabel *string + + // Represents one or more local secondary indexes on the table. Each index is + // scoped to a given partition key value. Tables with one or more local secondary + // indexes are subject to an item collection size limit, where the amount of data + // within a given item collection cannot exceed 10 GB. Each element is composed of: + // + // - IndexName - The name of the local secondary index. + // + // - KeySchema - Specifies the complete index key schema. The attribute names in + // the key schema must be between 1 and 255 characters (inclusive). The key schema + // must begin with the same partition key as the table. + // + // - Projection - Specifies attributes that are copied (projected) from the table + // into the index. These are in addition to the primary key attributes and index + // key attributes, which are automatically projected. Each attribute specification + // is composed of: + // + // - ProjectionType - One of the following: + // + // - KEYS_ONLY - Only the index and primary keys are projected into the index. + // + // - INCLUDE - Only the specified table attributes are projected into the index. + // The list of projected attributes is in NonKeyAttributes . + // + // - ALL - All of the table attributes are projected into the index. + // + // - NonKeyAttributes - A list of one or more non-key attribute names that are + // projected into the secondary index. The total count of attributes provided in + // NonKeyAttributes , summed across all of the secondary indexes, must not exceed + // 100. If you project the same attribute into two different indexes, this counts + // as two distinct attributes when determining the total. This limit only applies + // when you specify the ProjectionType of INCLUDE . You still can specify the + // ProjectionType of ALL to project all attributes from the source table, even if + // the table has more than 100 attributes. + // + // - IndexSizeBytes - Represents the total size of the index, in bytes. DynamoDB + // updates this value approximately every six hours. Recent changes might not be + // reflected in this value. + // + // - ItemCount - Represents the number of items in the index. DynamoDB updates + // this value approximately every six hours. Recent changes might not be reflected + // in this value. + // + // If the table is in the DELETING state, no information about indexes will be + // returned. + LocalSecondaryIndexes []LocalSecondaryIndexDescription + + // Indicates one of the following consistency modes for a global table: + // + // - EVENTUAL : Indicates that the global table is configured for multi-Region + // eventual consistency (MREC). + // + // - STRONG : Indicates that the global table is configured for multi-Region + // strong consistency (MRSC). + // + // If you don't specify this field, the global table consistency mode defaults to + // EVENTUAL . For more information about global tables consistency modes, see [Consistency modes] in + // DynamoDB developer guide. + // + // [Consistency modes]: https://docs.aws.amazon.com/V2globaltables_HowItWorks.html#V2globaltables_HowItWorks.consistency-modes + MultiRegionConsistency MultiRegionConsistency + + // The maximum number of read and write units for the specified on-demand table. + // If you use this parameter, you must specify MaxReadRequestUnits , + // MaxWriteRequestUnits , or both. + OnDemandThroughput *OnDemandThroughput + + // The provisioned throughput settings for the table, consisting of read and write + // capacity units, along with data about increases and decreases. + ProvisionedThroughput *ProvisionedThroughputDescription + + // Represents replicas of the table. + Replicas []ReplicaDescription + + // Contains details for the restore. + RestoreSummary *RestoreSummary + + // The description of the server-side encryption status on the specified table. + SSEDescription *SSEDescription + + // The current DynamoDB Streams configuration for the table. + StreamSpecification *StreamSpecification + + // The Amazon Resource Name (ARN) that uniquely identifies the table. + TableArn *string + + // Contains details of the table class. + TableClassSummary *TableClassSummary + + // Unique identifier for the table for which the backup was created. + TableId *string + + // The name of the table. + TableName *string + + // The total size of the specified table, in bytes. DynamoDB updates this value + // approximately every six hours. Recent changes might not be reflected in this + // value. + TableSizeBytes *int64 + + // The current state of the table: + // + // - CREATING - The table is being created. + // + // - UPDATING - The table/index configuration is being updated. The table/index + // remains available for data operations when UPDATING . + // + // - DELETING - The table is being deleted. + // + // - ACTIVE - The table is ready for use. + // + // - INACCESSIBLE_ENCRYPTION_CREDENTIALS - The KMS key used to encrypt the table + // in inaccessible. Table operations may fail due to failure to use the KMS key. + // DynamoDB will initiate the table archival process when a table's KMS key remains + // inaccessible for more than seven days. + // + // - ARCHIVING - The table is being archived. Operations are not allowed until + // archival is complete. + // + // - ARCHIVED - The table has been archived. See the ArchivalReason for more + // information. + TableStatus TableStatus + + // Describes the warm throughput value of the base table. + WarmThroughput *TableWarmThroughputDescription + + noSmithyDocumentSerde +} + +// Represents the warm throughput value (in read units per second and write units +// per second) of the table. Warm throughput is applicable for DynamoDB Standard-IA +// tables and specifies the minimum provisioned capacity maintained for immediate +// data access. +type TableWarmThroughputDescription struct { + + // Represents the base table's warm throughput value in read units per second. + ReadUnitsPerSecond *int64 + + // Represents warm throughput value of the base table. + Status TableStatus + + // Represents the base table's warm throughput value in write units per second. + WriteUnitsPerSecond *int64 + + noSmithyDocumentSerde +} + +// Describes a tag. A tag is a key-value pair. You can add up to 50 tags to a +// single DynamoDB table. +// +// Amazon Web Services-assigned tag names and values are automatically assigned +// the aws: prefix, which the user cannot assign. Amazon Web Services-assigned tag +// names do not count towards the tag limit of 50. User-assigned tag names have the +// prefix user: in the Cost Allocation Report. You cannot backdate the application +// of a tag. +// +// For an overview on tagging DynamoDB resources, see [Tagging for DynamoDB] in the Amazon DynamoDB +// Developer Guide. +// +// [Tagging for DynamoDB]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html +type Tag struct { + + // The key of the tag. Tag keys are case sensitive. Each DynamoDB table can only + // have up to one tag with the same key. If you try to add an existing tag (same + // key), the existing tag value will be updated to the new value. + // + // This member is required. + Key *string + + // The value of the tag. Tag values are case-sensitive and can be null. + // + // This member is required. + Value *string + + noSmithyDocumentSerde +} + +// Represents the specific reason why a DynamoDB request was throttled and the ARN +// of the impacted resource. This helps identify exactly what resource is being +// throttled, what type of operation caused it, and why the throttling occurred. +type ThrottlingReason struct { + + // The reason for throttling. The throttling reason follows a specific format: + // ResourceType+OperationType+LimitType : + // + // - Resource Type (What is being throttled): Table or Index + // + // - Operation Type (What kind of operation): Read or Write + // + // - Limit Type (Why the throttling occurred): + // + // - ProvisionedThroughputExceeded : The request rate is exceeding the [provisioned throughput capacity](read or + // write capacity units) configured for a table or a global secondary index (GSI) + // in provisioned capacity mode. + // + // - AccountLimitExceeded : The request rate has caused a table or global + // secondary index (GSI) in on-demand mode to exceed the [per-table account-level service quotas]for read/write + // throughput in the current Amazon Web Services Region. + // + // - KeyRangeThroughputExceeded : The request rate directed at a specific + // partition key value has exceeded the [internal partition-level throughput limits], indicating uneven access patterns + // across the table's or GSI's key space. + // + // - MaxOnDemandThroughputExceeded : The request rate has exceeded the [configured maximum throughput limits]set for a + // table or index in on-demand capacity mode. + // + // Examples of complete throttling reasons: + // + // - TableReadProvisionedThroughputExceeded + // + // - IndexWriteAccountLimitExceeded + // + // This helps identify exactly what resource is being throttled, what type of + // operation caused it, and why the throttling occurred. + // + // [provisioned throughput capacity]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/provisioned-capacity-mode.html + // [per-table account-level service quotas]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ServiceQuotas.html#default-limits-throughput + // [configured maximum throughput limits]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/on-demand-capacity-mode-max-throughput.html + // [internal partition-level throughput limits]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/bp-partition-key-design.html + Reason *string + + // The Amazon Resource Name (ARN) of the DynamoDB table or index that experienced + // the throttling event. + Resource *string + + noSmithyDocumentSerde +} + +// The description of the Time to Live (TTL) status on the specified table. +type TimeToLiveDescription struct { + + // The name of the TTL attribute for items in the table. + AttributeName *string + + // The TTL status for the table. + TimeToLiveStatus TimeToLiveStatus + + noSmithyDocumentSerde +} + +// Represents the settings used to enable or disable Time to Live (TTL) for the +// specified table. +type TimeToLiveSpecification struct { + + // The name of the TTL attribute used to store the expiration time for items in + // the table. + // + // This member is required. + AttributeName *string + + // Indicates whether TTL is to be enabled (true) or disabled (false) on the table. + // + // This member is required. + Enabled *bool + + noSmithyDocumentSerde +} + +// Specifies an item to be retrieved as part of the transaction. +type TransactGetItem struct { + + // Contains the primary key that identifies the item to get, together with the + // name of the table that contains the item, and optionally the specific attributes + // of the item to retrieve. + // + // This member is required. + Get *Get + + noSmithyDocumentSerde +} + +// A list of requests that can perform update, put, delete, or check operations on +// multiple items in one or more tables atomically. +type TransactWriteItem struct { + + // A request to perform a check item operation. + ConditionCheck *ConditionCheck + + // A request to perform a DeleteItem operation. + Delete *Delete + + // A request to perform a PutItem operation. + Put *Put + + // A request to perform an UpdateItem operation. + Update *Update + + noSmithyDocumentSerde +} + +// Represents a request to perform an UpdateItem operation. +type Update struct { + + // The primary key of the item to be updated. Each element consists of an + // attribute name and a value for that attribute. + // + // This member is required. + Key map[string]AttributeValue + + // Name of the table for the UpdateItem request. You can also provide the Amazon + // Resource Name (ARN) of the table in this parameter. + // + // This member is required. + TableName *string + + // An expression that defines one or more attributes to be updated, the action to + // be performed on them, and new value(s) for them. + // + // This member is required. + UpdateExpression *string + + // A condition that must be satisfied in order for a conditional update to succeed. + ConditionExpression *string + + // One or more substitution tokens for attribute names in an expression. + ExpressionAttributeNames map[string]string + + // One or more values that can be substituted in an expression. + ExpressionAttributeValues map[string]AttributeValue + + // Use ReturnValuesOnConditionCheckFailure to get the item attributes if the Update + // condition fails. For ReturnValuesOnConditionCheckFailure , the valid values are: + // NONE and ALL_OLD. + ReturnValuesOnConditionCheckFailure ReturnValuesOnConditionCheckFailure + + noSmithyDocumentSerde +} + +// Represents the new provisioned throughput settings to be applied to a global +// secondary index. +type UpdateGlobalSecondaryIndexAction struct { + + // The name of the global secondary index to be updated. + // + // This member is required. + IndexName *string + + // Updates the maximum number of read and write units for the specified global + // secondary index. If you use this parameter, you must specify MaxReadRequestUnits + // , MaxWriteRequestUnits , or both. + OnDemandThroughput *OnDemandThroughput + + // Represents the provisioned throughput settings for the specified global + // secondary index. + // + // For current minimum and maximum provisioned throughput values, see [Service, Account, and Table Quotas] in the + // Amazon DynamoDB Developer Guide. + // + // [Service, Account, and Table Quotas]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html + ProvisionedThroughput *ProvisionedThroughput + + // Represents the warm throughput value of the new provisioned throughput settings + // to be applied to a global secondary index. + WarmThroughput *WarmThroughput + + noSmithyDocumentSerde +} + +// Enables updating the configuration for Kinesis Streaming. +type UpdateKinesisStreamingConfiguration struct { + + // Enables updating the precision of Kinesis data stream timestamp. + ApproximateCreationDateTimePrecision ApproximateCreationDateTimePrecision + + noSmithyDocumentSerde +} + +// Represents a replica to be modified. +type UpdateReplicationGroupMemberAction struct { + + // The Region where the replica exists. + // + // This member is required. + RegionName *string + + // Replica-specific global secondary index settings. + GlobalSecondaryIndexes []ReplicaGlobalSecondaryIndex + + // The KMS key of the replica that should be used for KMS encryption. To specify a + // key, use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. Note + // that you should only provide this parameter if the key is different from the + // default DynamoDB KMS key alias/aws/dynamodb . + KMSMasterKeyId *string + + // Overrides the maximum on-demand throughput for the replica table. + OnDemandThroughputOverride *OnDemandThroughputOverride + + // Replica-specific provisioned throughput. If not specified, uses the source + // table's provisioned throughput settings. + ProvisionedThroughputOverride *ProvisionedThroughputOverride + + // Replica-specific table class. If not specified, uses the source table's table + // class. + TableClassOverride TableClass + + noSmithyDocumentSerde +} + +// Provides visibility into the number of read and write operations your table or +// secondary index can instantaneously support. The settings can be modified using +// the UpdateTable operation to meet the throughput requirements of an upcoming +// peak event. +type WarmThroughput struct { + + // Represents the number of read operations your base table can instantaneously + // support. + ReadUnitsPerSecond *int64 + + // Represents the number of write operations your base table can instantaneously + // support. + WriteUnitsPerSecond *int64 + + noSmithyDocumentSerde +} + +// Represents an operation to perform - either DeleteItem or PutItem . You can only +// request one of these operations, not both, in a single WriteRequest . If you do +// need to perform both of these operations, you need to provide two separate +// WriteRequest objects. +type WriteRequest struct { + + // A request to perform a DeleteItem operation. + DeleteRequest *DeleteRequest + + // A request to perform a PutItem operation. + PutRequest *PutRequest + + noSmithyDocumentSerde +} + +type noSmithyDocumentSerde = smithydocument.NoSerde + +// UnknownUnionMember is returned when a union member is returned over the wire, +// but has an unknown tag. +type UnknownUnionMember struct { + Tag string + Value []byte + + noSmithyDocumentSerde +} + +func (*UnknownUnionMember) isAttributeValue() {} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/validators.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/validators.go new file mode 100644 index 0000000000..781ef41cf3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/validators.go @@ -0,0 +1,3558 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package dynamodb + +import ( + "context" + "fmt" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/middleware" +) + +type validateOpBatchExecuteStatement struct { +} + +func (*validateOpBatchExecuteStatement) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpBatchExecuteStatement) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*BatchExecuteStatementInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpBatchExecuteStatementInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpBatchGetItem struct { +} + +func (*validateOpBatchGetItem) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpBatchGetItem) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*BatchGetItemInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpBatchGetItemInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpBatchWriteItem struct { +} + +func (*validateOpBatchWriteItem) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpBatchWriteItem) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*BatchWriteItemInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpBatchWriteItemInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpCreateBackup struct { +} + +func (*validateOpCreateBackup) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCreateBackup) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreateBackupInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCreateBackupInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpCreateGlobalTable struct { +} + +func (*validateOpCreateGlobalTable) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCreateGlobalTable) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreateGlobalTableInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCreateGlobalTableInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpCreateTable struct { +} + +func (*validateOpCreateTable) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCreateTable) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreateTableInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCreateTableInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteBackup struct { +} + +func (*validateOpDeleteBackup) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteBackup) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteBackupInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteBackupInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteItem struct { +} + +func (*validateOpDeleteItem) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteItem) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteItemInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteItemInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteResourcePolicy struct { +} + +func (*validateOpDeleteResourcePolicy) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteResourcePolicy) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteResourcePolicyInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteResourcePolicyInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteTable struct { +} + +func (*validateOpDeleteTable) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteTable) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteTableInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteTableInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDescribeBackup struct { +} + +func (*validateOpDescribeBackup) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDescribeBackup) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DescribeBackupInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDescribeBackupInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDescribeContinuousBackups struct { +} + +func (*validateOpDescribeContinuousBackups) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDescribeContinuousBackups) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DescribeContinuousBackupsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDescribeContinuousBackupsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDescribeContributorInsights struct { +} + +func (*validateOpDescribeContributorInsights) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDescribeContributorInsights) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DescribeContributorInsightsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDescribeContributorInsightsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDescribeExport struct { +} + +func (*validateOpDescribeExport) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDescribeExport) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DescribeExportInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDescribeExportInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDescribeGlobalTable struct { +} + +func (*validateOpDescribeGlobalTable) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDescribeGlobalTable) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DescribeGlobalTableInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDescribeGlobalTableInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDescribeGlobalTableSettings struct { +} + +func (*validateOpDescribeGlobalTableSettings) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDescribeGlobalTableSettings) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DescribeGlobalTableSettingsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDescribeGlobalTableSettingsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDescribeImport struct { +} + +func (*validateOpDescribeImport) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDescribeImport) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DescribeImportInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDescribeImportInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDescribeKinesisStreamingDestination struct { +} + +func (*validateOpDescribeKinesisStreamingDestination) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDescribeKinesisStreamingDestination) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DescribeKinesisStreamingDestinationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDescribeKinesisStreamingDestinationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDescribeTable struct { +} + +func (*validateOpDescribeTable) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDescribeTable) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DescribeTableInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDescribeTableInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDescribeTableReplicaAutoScaling struct { +} + +func (*validateOpDescribeTableReplicaAutoScaling) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDescribeTableReplicaAutoScaling) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DescribeTableReplicaAutoScalingInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDescribeTableReplicaAutoScalingInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDescribeTimeToLive struct { +} + +func (*validateOpDescribeTimeToLive) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDescribeTimeToLive) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DescribeTimeToLiveInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDescribeTimeToLiveInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDisableKinesisStreamingDestination struct { +} + +func (*validateOpDisableKinesisStreamingDestination) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDisableKinesisStreamingDestination) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DisableKinesisStreamingDestinationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDisableKinesisStreamingDestinationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpEnableKinesisStreamingDestination struct { +} + +func (*validateOpEnableKinesisStreamingDestination) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpEnableKinesisStreamingDestination) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*EnableKinesisStreamingDestinationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpEnableKinesisStreamingDestinationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpExecuteStatement struct { +} + +func (*validateOpExecuteStatement) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpExecuteStatement) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ExecuteStatementInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpExecuteStatementInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpExecuteTransaction struct { +} + +func (*validateOpExecuteTransaction) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpExecuteTransaction) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ExecuteTransactionInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpExecuteTransactionInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpExportTableToPointInTime struct { +} + +func (*validateOpExportTableToPointInTime) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpExportTableToPointInTime) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ExportTableToPointInTimeInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpExportTableToPointInTimeInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetItem struct { +} + +func (*validateOpGetItem) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetItem) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetItemInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetItemInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetResourcePolicy struct { +} + +func (*validateOpGetResourcePolicy) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetResourcePolicy) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetResourcePolicyInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetResourcePolicyInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpImportTable struct { +} + +func (*validateOpImportTable) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpImportTable) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ImportTableInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpImportTableInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListTagsOfResource struct { +} + +func (*validateOpListTagsOfResource) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListTagsOfResource) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListTagsOfResourceInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListTagsOfResourceInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutItem struct { +} + +func (*validateOpPutItem) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutItem) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutItemInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutItemInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutResourcePolicy struct { +} + +func (*validateOpPutResourcePolicy) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutResourcePolicy) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutResourcePolicyInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutResourcePolicyInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpQuery struct { +} + +func (*validateOpQuery) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpQuery) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*QueryInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpQueryInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpRestoreTableFromBackup struct { +} + +func (*validateOpRestoreTableFromBackup) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpRestoreTableFromBackup) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*RestoreTableFromBackupInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpRestoreTableFromBackupInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpRestoreTableToPointInTime struct { +} + +func (*validateOpRestoreTableToPointInTime) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpRestoreTableToPointInTime) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*RestoreTableToPointInTimeInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpRestoreTableToPointInTimeInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpScan struct { +} + +func (*validateOpScan) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpScan) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ScanInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpScanInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpTagResource struct { +} + +func (*validateOpTagResource) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpTagResource) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*TagResourceInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpTagResourceInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpTransactGetItems struct { +} + +func (*validateOpTransactGetItems) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpTransactGetItems) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*TransactGetItemsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpTransactGetItemsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpTransactWriteItems struct { +} + +func (*validateOpTransactWriteItems) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpTransactWriteItems) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*TransactWriteItemsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpTransactWriteItemsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpUntagResource struct { +} + +func (*validateOpUntagResource) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpUntagResource) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*UntagResourceInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpUntagResourceInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpUpdateContinuousBackups struct { +} + +func (*validateOpUpdateContinuousBackups) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpUpdateContinuousBackups) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*UpdateContinuousBackupsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpUpdateContinuousBackupsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpUpdateContributorInsights struct { +} + +func (*validateOpUpdateContributorInsights) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpUpdateContributorInsights) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*UpdateContributorInsightsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpUpdateContributorInsightsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpUpdateGlobalTable struct { +} + +func (*validateOpUpdateGlobalTable) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpUpdateGlobalTable) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*UpdateGlobalTableInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpUpdateGlobalTableInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpUpdateGlobalTableSettings struct { +} + +func (*validateOpUpdateGlobalTableSettings) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpUpdateGlobalTableSettings) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*UpdateGlobalTableSettingsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpUpdateGlobalTableSettingsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpUpdateItem struct { +} + +func (*validateOpUpdateItem) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpUpdateItem) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*UpdateItemInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpUpdateItemInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpUpdateKinesisStreamingDestination struct { +} + +func (*validateOpUpdateKinesisStreamingDestination) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpUpdateKinesisStreamingDestination) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*UpdateKinesisStreamingDestinationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpUpdateKinesisStreamingDestinationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpUpdateTable struct { +} + +func (*validateOpUpdateTable) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpUpdateTable) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*UpdateTableInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpUpdateTableInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpUpdateTableReplicaAutoScaling struct { +} + +func (*validateOpUpdateTableReplicaAutoScaling) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpUpdateTableReplicaAutoScaling) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*UpdateTableReplicaAutoScalingInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpUpdateTableReplicaAutoScalingInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpUpdateTimeToLive struct { +} + +func (*validateOpUpdateTimeToLive) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpUpdateTimeToLive) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*UpdateTimeToLiveInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpUpdateTimeToLiveInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +func addOpBatchExecuteStatementValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpBatchExecuteStatement{}, middleware.After) +} + +func addOpBatchGetItemValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpBatchGetItem{}, middleware.After) +} + +func addOpBatchWriteItemValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpBatchWriteItem{}, middleware.After) +} + +func addOpCreateBackupValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCreateBackup{}, middleware.After) +} + +func addOpCreateGlobalTableValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCreateGlobalTable{}, middleware.After) +} + +func addOpCreateTableValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCreateTable{}, middleware.After) +} + +func addOpDeleteBackupValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteBackup{}, middleware.After) +} + +func addOpDeleteItemValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteItem{}, middleware.After) +} + +func addOpDeleteResourcePolicyValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteResourcePolicy{}, middleware.After) +} + +func addOpDeleteTableValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteTable{}, middleware.After) +} + +func addOpDescribeBackupValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDescribeBackup{}, middleware.After) +} + +func addOpDescribeContinuousBackupsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDescribeContinuousBackups{}, middleware.After) +} + +func addOpDescribeContributorInsightsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDescribeContributorInsights{}, middleware.After) +} + +func addOpDescribeExportValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDescribeExport{}, middleware.After) +} + +func addOpDescribeGlobalTableValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDescribeGlobalTable{}, middleware.After) +} + +func addOpDescribeGlobalTableSettingsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDescribeGlobalTableSettings{}, middleware.After) +} + +func addOpDescribeImportValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDescribeImport{}, middleware.After) +} + +func addOpDescribeKinesisStreamingDestinationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDescribeKinesisStreamingDestination{}, middleware.After) +} + +func addOpDescribeTableValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDescribeTable{}, middleware.After) +} + +func addOpDescribeTableReplicaAutoScalingValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDescribeTableReplicaAutoScaling{}, middleware.After) +} + +func addOpDescribeTimeToLiveValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDescribeTimeToLive{}, middleware.After) +} + +func addOpDisableKinesisStreamingDestinationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDisableKinesisStreamingDestination{}, middleware.After) +} + +func addOpEnableKinesisStreamingDestinationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpEnableKinesisStreamingDestination{}, middleware.After) +} + +func addOpExecuteStatementValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpExecuteStatement{}, middleware.After) +} + +func addOpExecuteTransactionValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpExecuteTransaction{}, middleware.After) +} + +func addOpExportTableToPointInTimeValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpExportTableToPointInTime{}, middleware.After) +} + +func addOpGetItemValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetItem{}, middleware.After) +} + +func addOpGetResourcePolicyValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetResourcePolicy{}, middleware.After) +} + +func addOpImportTableValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpImportTable{}, middleware.After) +} + +func addOpListTagsOfResourceValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListTagsOfResource{}, middleware.After) +} + +func addOpPutItemValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutItem{}, middleware.After) +} + +func addOpPutResourcePolicyValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutResourcePolicy{}, middleware.After) +} + +func addOpQueryValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpQuery{}, middleware.After) +} + +func addOpRestoreTableFromBackupValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpRestoreTableFromBackup{}, middleware.After) +} + +func addOpRestoreTableToPointInTimeValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpRestoreTableToPointInTime{}, middleware.After) +} + +func addOpScanValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpScan{}, middleware.After) +} + +func addOpTagResourceValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpTagResource{}, middleware.After) +} + +func addOpTransactGetItemsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpTransactGetItems{}, middleware.After) +} + +func addOpTransactWriteItemsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpTransactWriteItems{}, middleware.After) +} + +func addOpUntagResourceValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpUntagResource{}, middleware.After) +} + +func addOpUpdateContinuousBackupsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpUpdateContinuousBackups{}, middleware.After) +} + +func addOpUpdateContributorInsightsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpUpdateContributorInsights{}, middleware.After) +} + +func addOpUpdateGlobalTableValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpUpdateGlobalTable{}, middleware.After) +} + +func addOpUpdateGlobalTableSettingsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpUpdateGlobalTableSettings{}, middleware.After) +} + +func addOpUpdateItemValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpUpdateItem{}, middleware.After) +} + +func addOpUpdateKinesisStreamingDestinationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpUpdateKinesisStreamingDestination{}, middleware.After) +} + +func addOpUpdateTableValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpUpdateTable{}, middleware.After) +} + +func addOpUpdateTableReplicaAutoScalingValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpUpdateTableReplicaAutoScaling{}, middleware.After) +} + +func addOpUpdateTimeToLiveValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpUpdateTimeToLive{}, middleware.After) +} + +func validateAttributeDefinition(v *types.AttributeDefinition) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "AttributeDefinition"} + if v.AttributeName == nil { + invalidParams.Add(smithy.NewErrParamRequired("AttributeName")) + } + if len(v.AttributeType) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("AttributeType")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateAttributeDefinitions(v []types.AttributeDefinition) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "AttributeDefinitions"} + for i := range v { + if err := validateAttributeDefinition(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateAutoScalingPolicyUpdate(v *types.AutoScalingPolicyUpdate) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "AutoScalingPolicyUpdate"} + if v.TargetTrackingScalingPolicyConfiguration == nil { + invalidParams.Add(smithy.NewErrParamRequired("TargetTrackingScalingPolicyConfiguration")) + } else if v.TargetTrackingScalingPolicyConfiguration != nil { + if err := validateAutoScalingTargetTrackingScalingPolicyConfigurationUpdate(v.TargetTrackingScalingPolicyConfiguration); err != nil { + invalidParams.AddNested("TargetTrackingScalingPolicyConfiguration", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateAutoScalingSettingsUpdate(v *types.AutoScalingSettingsUpdate) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "AutoScalingSettingsUpdate"} + if v.ScalingPolicyUpdate != nil { + if err := validateAutoScalingPolicyUpdate(v.ScalingPolicyUpdate); err != nil { + invalidParams.AddNested("ScalingPolicyUpdate", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateAutoScalingTargetTrackingScalingPolicyConfigurationUpdate(v *types.AutoScalingTargetTrackingScalingPolicyConfigurationUpdate) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "AutoScalingTargetTrackingScalingPolicyConfigurationUpdate"} + if v.TargetValue == nil { + invalidParams.Add(smithy.NewErrParamRequired("TargetValue")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateBatchGetRequestMap(v map[string]types.KeysAndAttributes) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "BatchGetRequestMap"} + for key := range v { + value := v[key] + if err := validateKeysAndAttributes(&value); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%q]", key), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateBatchStatementRequest(v *types.BatchStatementRequest) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "BatchStatementRequest"} + if v.Statement == nil { + invalidParams.Add(smithy.NewErrParamRequired("Statement")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateBatchWriteItemRequestMap(v map[string][]types.WriteRequest) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "BatchWriteItemRequestMap"} + for key := range v { + if err := validateWriteRequests(v[key]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%q]", key), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateCondition(v *types.Condition) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "Condition"} + if len(v.ComparisonOperator) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("ComparisonOperator")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateConditionCheck(v *types.ConditionCheck) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ConditionCheck"} + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if v.TableName == nil { + invalidParams.Add(smithy.NewErrParamRequired("TableName")) + } + if v.ConditionExpression == nil { + invalidParams.Add(smithy.NewErrParamRequired("ConditionExpression")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateCreateGlobalSecondaryIndexAction(v *types.CreateGlobalSecondaryIndexAction) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreateGlobalSecondaryIndexAction"} + if v.IndexName == nil { + invalidParams.Add(smithy.NewErrParamRequired("IndexName")) + } + if v.KeySchema == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeySchema")) + } else if v.KeySchema != nil { + if err := validateKeySchema(v.KeySchema); err != nil { + invalidParams.AddNested("KeySchema", err.(smithy.InvalidParamsError)) + } + } + if v.Projection == nil { + invalidParams.Add(smithy.NewErrParamRequired("Projection")) + } + if v.ProvisionedThroughput != nil { + if err := validateProvisionedThroughput(v.ProvisionedThroughput); err != nil { + invalidParams.AddNested("ProvisionedThroughput", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateCreateGlobalTableWitnessGroupMemberAction(v *types.CreateGlobalTableWitnessGroupMemberAction) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreateGlobalTableWitnessGroupMemberAction"} + if v.RegionName == nil { + invalidParams.Add(smithy.NewErrParamRequired("RegionName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateCreateReplicaAction(v *types.CreateReplicaAction) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreateReplicaAction"} + if v.RegionName == nil { + invalidParams.Add(smithy.NewErrParamRequired("RegionName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateCreateReplicationGroupMemberAction(v *types.CreateReplicationGroupMemberAction) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreateReplicationGroupMemberAction"} + if v.RegionName == nil { + invalidParams.Add(smithy.NewErrParamRequired("RegionName")) + } + if v.GlobalSecondaryIndexes != nil { + if err := validateReplicaGlobalSecondaryIndexList(v.GlobalSecondaryIndexes); err != nil { + invalidParams.AddNested("GlobalSecondaryIndexes", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateDelete(v *types.Delete) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "Delete"} + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if v.TableName == nil { + invalidParams.Add(smithy.NewErrParamRequired("TableName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateDeleteGlobalSecondaryIndexAction(v *types.DeleteGlobalSecondaryIndexAction) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteGlobalSecondaryIndexAction"} + if v.IndexName == nil { + invalidParams.Add(smithy.NewErrParamRequired("IndexName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateDeleteGlobalTableWitnessGroupMemberAction(v *types.DeleteGlobalTableWitnessGroupMemberAction) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteGlobalTableWitnessGroupMemberAction"} + if v.RegionName == nil { + invalidParams.Add(smithy.NewErrParamRequired("RegionName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateDeleteReplicaAction(v *types.DeleteReplicaAction) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteReplicaAction"} + if v.RegionName == nil { + invalidParams.Add(smithy.NewErrParamRequired("RegionName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateDeleteReplicationGroupMemberAction(v *types.DeleteReplicationGroupMemberAction) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteReplicationGroupMemberAction"} + if v.RegionName == nil { + invalidParams.Add(smithy.NewErrParamRequired("RegionName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateDeleteRequest(v *types.DeleteRequest) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteRequest"} + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateFilterConditionMap(v map[string]types.Condition) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "FilterConditionMap"} + for key := range v { + value := v[key] + if err := validateCondition(&value); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%q]", key), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateGet(v *types.Get) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "Get"} + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if v.TableName == nil { + invalidParams.Add(smithy.NewErrParamRequired("TableName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateGlobalSecondaryIndex(v *types.GlobalSecondaryIndex) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GlobalSecondaryIndex"} + if v.IndexName == nil { + invalidParams.Add(smithy.NewErrParamRequired("IndexName")) + } + if v.KeySchema == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeySchema")) + } else if v.KeySchema != nil { + if err := validateKeySchema(v.KeySchema); err != nil { + invalidParams.AddNested("KeySchema", err.(smithy.InvalidParamsError)) + } + } + if v.Projection == nil { + invalidParams.Add(smithy.NewErrParamRequired("Projection")) + } + if v.ProvisionedThroughput != nil { + if err := validateProvisionedThroughput(v.ProvisionedThroughput); err != nil { + invalidParams.AddNested("ProvisionedThroughput", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateGlobalSecondaryIndexAutoScalingUpdate(v *types.GlobalSecondaryIndexAutoScalingUpdate) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GlobalSecondaryIndexAutoScalingUpdate"} + if v.ProvisionedWriteCapacityAutoScalingUpdate != nil { + if err := validateAutoScalingSettingsUpdate(v.ProvisionedWriteCapacityAutoScalingUpdate); err != nil { + invalidParams.AddNested("ProvisionedWriteCapacityAutoScalingUpdate", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateGlobalSecondaryIndexAutoScalingUpdateList(v []types.GlobalSecondaryIndexAutoScalingUpdate) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GlobalSecondaryIndexAutoScalingUpdateList"} + for i := range v { + if err := validateGlobalSecondaryIndexAutoScalingUpdate(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateGlobalSecondaryIndexList(v []types.GlobalSecondaryIndex) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GlobalSecondaryIndexList"} + for i := range v { + if err := validateGlobalSecondaryIndex(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateGlobalSecondaryIndexUpdate(v *types.GlobalSecondaryIndexUpdate) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GlobalSecondaryIndexUpdate"} + if v.Update != nil { + if err := validateUpdateGlobalSecondaryIndexAction(v.Update); err != nil { + invalidParams.AddNested("Update", err.(smithy.InvalidParamsError)) + } + } + if v.Create != nil { + if err := validateCreateGlobalSecondaryIndexAction(v.Create); err != nil { + invalidParams.AddNested("Create", err.(smithy.InvalidParamsError)) + } + } + if v.Delete != nil { + if err := validateDeleteGlobalSecondaryIndexAction(v.Delete); err != nil { + invalidParams.AddNested("Delete", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateGlobalSecondaryIndexUpdateList(v []types.GlobalSecondaryIndexUpdate) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GlobalSecondaryIndexUpdateList"} + for i := range v { + if err := validateGlobalSecondaryIndexUpdate(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateGlobalTableGlobalSecondaryIndexSettingsUpdate(v *types.GlobalTableGlobalSecondaryIndexSettingsUpdate) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GlobalTableGlobalSecondaryIndexSettingsUpdate"} + if v.IndexName == nil { + invalidParams.Add(smithy.NewErrParamRequired("IndexName")) + } + if v.ProvisionedWriteCapacityAutoScalingSettingsUpdate != nil { + if err := validateAutoScalingSettingsUpdate(v.ProvisionedWriteCapacityAutoScalingSettingsUpdate); err != nil { + invalidParams.AddNested("ProvisionedWriteCapacityAutoScalingSettingsUpdate", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateGlobalTableGlobalSecondaryIndexSettingsUpdateList(v []types.GlobalTableGlobalSecondaryIndexSettingsUpdate) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GlobalTableGlobalSecondaryIndexSettingsUpdateList"} + for i := range v { + if err := validateGlobalTableGlobalSecondaryIndexSettingsUpdate(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateGlobalTableWitnessGroupUpdate(v *types.GlobalTableWitnessGroupUpdate) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GlobalTableWitnessGroupUpdate"} + if v.Create != nil { + if err := validateCreateGlobalTableWitnessGroupMemberAction(v.Create); err != nil { + invalidParams.AddNested("Create", err.(smithy.InvalidParamsError)) + } + } + if v.Delete != nil { + if err := validateDeleteGlobalTableWitnessGroupMemberAction(v.Delete); err != nil { + invalidParams.AddNested("Delete", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateGlobalTableWitnessGroupUpdateList(v []types.GlobalTableWitnessGroupUpdate) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GlobalTableWitnessGroupUpdateList"} + for i := range v { + if err := validateGlobalTableWitnessGroupUpdate(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateKeyConditions(v map[string]types.Condition) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "KeyConditions"} + for key := range v { + value := v[key] + if err := validateCondition(&value); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%q]", key), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateKeysAndAttributes(v *types.KeysAndAttributes) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "KeysAndAttributes"} + if v.Keys == nil { + invalidParams.Add(smithy.NewErrParamRequired("Keys")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateKeySchema(v []types.KeySchemaElement) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "KeySchema"} + for i := range v { + if err := validateKeySchemaElement(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateKeySchemaElement(v *types.KeySchemaElement) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "KeySchemaElement"} + if v.AttributeName == nil { + invalidParams.Add(smithy.NewErrParamRequired("AttributeName")) + } + if len(v.KeyType) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("KeyType")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateLocalSecondaryIndex(v *types.LocalSecondaryIndex) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "LocalSecondaryIndex"} + if v.IndexName == nil { + invalidParams.Add(smithy.NewErrParamRequired("IndexName")) + } + if v.KeySchema == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeySchema")) + } else if v.KeySchema != nil { + if err := validateKeySchema(v.KeySchema); err != nil { + invalidParams.AddNested("KeySchema", err.(smithy.InvalidParamsError)) + } + } + if v.Projection == nil { + invalidParams.Add(smithy.NewErrParamRequired("Projection")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateLocalSecondaryIndexList(v []types.LocalSecondaryIndex) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "LocalSecondaryIndexList"} + for i := range v { + if err := validateLocalSecondaryIndex(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateParameterizedStatement(v *types.ParameterizedStatement) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ParameterizedStatement"} + if v.Statement == nil { + invalidParams.Add(smithy.NewErrParamRequired("Statement")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateParameterizedStatements(v []types.ParameterizedStatement) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ParameterizedStatements"} + for i := range v { + if err := validateParameterizedStatement(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validatePartiQLBatchRequest(v []types.BatchStatementRequest) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PartiQLBatchRequest"} + for i := range v { + if err := validateBatchStatementRequest(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validatePointInTimeRecoverySpecification(v *types.PointInTimeRecoverySpecification) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PointInTimeRecoverySpecification"} + if v.PointInTimeRecoveryEnabled == nil { + invalidParams.Add(smithy.NewErrParamRequired("PointInTimeRecoveryEnabled")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateProvisionedThroughput(v *types.ProvisionedThroughput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ProvisionedThroughput"} + if v.ReadCapacityUnits == nil { + invalidParams.Add(smithy.NewErrParamRequired("ReadCapacityUnits")) + } + if v.WriteCapacityUnits == nil { + invalidParams.Add(smithy.NewErrParamRequired("WriteCapacityUnits")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validatePut(v *types.Put) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "Put"} + if v.Item == nil { + invalidParams.Add(smithy.NewErrParamRequired("Item")) + } + if v.TableName == nil { + invalidParams.Add(smithy.NewErrParamRequired("TableName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validatePutRequest(v *types.PutRequest) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutRequest"} + if v.Item == nil { + invalidParams.Add(smithy.NewErrParamRequired("Item")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateReplicaAutoScalingUpdate(v *types.ReplicaAutoScalingUpdate) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ReplicaAutoScalingUpdate"} + if v.RegionName == nil { + invalidParams.Add(smithy.NewErrParamRequired("RegionName")) + } + if v.ReplicaGlobalSecondaryIndexUpdates != nil { + if err := validateReplicaGlobalSecondaryIndexAutoScalingUpdateList(v.ReplicaGlobalSecondaryIndexUpdates); err != nil { + invalidParams.AddNested("ReplicaGlobalSecondaryIndexUpdates", err.(smithy.InvalidParamsError)) + } + } + if v.ReplicaProvisionedReadCapacityAutoScalingUpdate != nil { + if err := validateAutoScalingSettingsUpdate(v.ReplicaProvisionedReadCapacityAutoScalingUpdate); err != nil { + invalidParams.AddNested("ReplicaProvisionedReadCapacityAutoScalingUpdate", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateReplicaAutoScalingUpdateList(v []types.ReplicaAutoScalingUpdate) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ReplicaAutoScalingUpdateList"} + for i := range v { + if err := validateReplicaAutoScalingUpdate(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateReplicaGlobalSecondaryIndex(v *types.ReplicaGlobalSecondaryIndex) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ReplicaGlobalSecondaryIndex"} + if v.IndexName == nil { + invalidParams.Add(smithy.NewErrParamRequired("IndexName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateReplicaGlobalSecondaryIndexAutoScalingUpdate(v *types.ReplicaGlobalSecondaryIndexAutoScalingUpdate) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ReplicaGlobalSecondaryIndexAutoScalingUpdate"} + if v.ProvisionedReadCapacityAutoScalingUpdate != nil { + if err := validateAutoScalingSettingsUpdate(v.ProvisionedReadCapacityAutoScalingUpdate); err != nil { + invalidParams.AddNested("ProvisionedReadCapacityAutoScalingUpdate", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateReplicaGlobalSecondaryIndexAutoScalingUpdateList(v []types.ReplicaGlobalSecondaryIndexAutoScalingUpdate) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ReplicaGlobalSecondaryIndexAutoScalingUpdateList"} + for i := range v { + if err := validateReplicaGlobalSecondaryIndexAutoScalingUpdate(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateReplicaGlobalSecondaryIndexList(v []types.ReplicaGlobalSecondaryIndex) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ReplicaGlobalSecondaryIndexList"} + for i := range v { + if err := validateReplicaGlobalSecondaryIndex(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateReplicaGlobalSecondaryIndexSettingsUpdate(v *types.ReplicaGlobalSecondaryIndexSettingsUpdate) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ReplicaGlobalSecondaryIndexSettingsUpdate"} + if v.IndexName == nil { + invalidParams.Add(smithy.NewErrParamRequired("IndexName")) + } + if v.ProvisionedReadCapacityAutoScalingSettingsUpdate != nil { + if err := validateAutoScalingSettingsUpdate(v.ProvisionedReadCapacityAutoScalingSettingsUpdate); err != nil { + invalidParams.AddNested("ProvisionedReadCapacityAutoScalingSettingsUpdate", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateReplicaGlobalSecondaryIndexSettingsUpdateList(v []types.ReplicaGlobalSecondaryIndexSettingsUpdate) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ReplicaGlobalSecondaryIndexSettingsUpdateList"} + for i := range v { + if err := validateReplicaGlobalSecondaryIndexSettingsUpdate(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateReplicaSettingsUpdate(v *types.ReplicaSettingsUpdate) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ReplicaSettingsUpdate"} + if v.RegionName == nil { + invalidParams.Add(smithy.NewErrParamRequired("RegionName")) + } + if v.ReplicaProvisionedReadCapacityAutoScalingSettingsUpdate != nil { + if err := validateAutoScalingSettingsUpdate(v.ReplicaProvisionedReadCapacityAutoScalingSettingsUpdate); err != nil { + invalidParams.AddNested("ReplicaProvisionedReadCapacityAutoScalingSettingsUpdate", err.(smithy.InvalidParamsError)) + } + } + if v.ReplicaGlobalSecondaryIndexSettingsUpdate != nil { + if err := validateReplicaGlobalSecondaryIndexSettingsUpdateList(v.ReplicaGlobalSecondaryIndexSettingsUpdate); err != nil { + invalidParams.AddNested("ReplicaGlobalSecondaryIndexSettingsUpdate", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateReplicaSettingsUpdateList(v []types.ReplicaSettingsUpdate) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ReplicaSettingsUpdateList"} + for i := range v { + if err := validateReplicaSettingsUpdate(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateReplicationGroupUpdate(v *types.ReplicationGroupUpdate) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ReplicationGroupUpdate"} + if v.Create != nil { + if err := validateCreateReplicationGroupMemberAction(v.Create); err != nil { + invalidParams.AddNested("Create", err.(smithy.InvalidParamsError)) + } + } + if v.Update != nil { + if err := validateUpdateReplicationGroupMemberAction(v.Update); err != nil { + invalidParams.AddNested("Update", err.(smithy.InvalidParamsError)) + } + } + if v.Delete != nil { + if err := validateDeleteReplicationGroupMemberAction(v.Delete); err != nil { + invalidParams.AddNested("Delete", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateReplicationGroupUpdateList(v []types.ReplicationGroupUpdate) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ReplicationGroupUpdateList"} + for i := range v { + if err := validateReplicationGroupUpdate(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateReplicaUpdate(v *types.ReplicaUpdate) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ReplicaUpdate"} + if v.Create != nil { + if err := validateCreateReplicaAction(v.Create); err != nil { + invalidParams.AddNested("Create", err.(smithy.InvalidParamsError)) + } + } + if v.Delete != nil { + if err := validateDeleteReplicaAction(v.Delete); err != nil { + invalidParams.AddNested("Delete", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateReplicaUpdateList(v []types.ReplicaUpdate) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ReplicaUpdateList"} + for i := range v { + if err := validateReplicaUpdate(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateS3BucketSource(v *types.S3BucketSource) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "S3BucketSource"} + if v.S3Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("S3Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateStreamSpecification(v *types.StreamSpecification) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "StreamSpecification"} + if v.StreamEnabled == nil { + invalidParams.Add(smithy.NewErrParamRequired("StreamEnabled")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateTableCreationParameters(v *types.TableCreationParameters) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "TableCreationParameters"} + if v.TableName == nil { + invalidParams.Add(smithy.NewErrParamRequired("TableName")) + } + if v.AttributeDefinitions == nil { + invalidParams.Add(smithy.NewErrParamRequired("AttributeDefinitions")) + } else if v.AttributeDefinitions != nil { + if err := validateAttributeDefinitions(v.AttributeDefinitions); err != nil { + invalidParams.AddNested("AttributeDefinitions", err.(smithy.InvalidParamsError)) + } + } + if v.KeySchema == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeySchema")) + } else if v.KeySchema != nil { + if err := validateKeySchema(v.KeySchema); err != nil { + invalidParams.AddNested("KeySchema", err.(smithy.InvalidParamsError)) + } + } + if v.ProvisionedThroughput != nil { + if err := validateProvisionedThroughput(v.ProvisionedThroughput); err != nil { + invalidParams.AddNested("ProvisionedThroughput", err.(smithy.InvalidParamsError)) + } + } + if v.GlobalSecondaryIndexes != nil { + if err := validateGlobalSecondaryIndexList(v.GlobalSecondaryIndexes); err != nil { + invalidParams.AddNested("GlobalSecondaryIndexes", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateTag(v *types.Tag) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "Tag"} + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if v.Value == nil { + invalidParams.Add(smithy.NewErrParamRequired("Value")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateTagList(v []types.Tag) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "TagList"} + for i := range v { + if err := validateTag(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateTimeToLiveSpecification(v *types.TimeToLiveSpecification) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "TimeToLiveSpecification"} + if v.Enabled == nil { + invalidParams.Add(smithy.NewErrParamRequired("Enabled")) + } + if v.AttributeName == nil { + invalidParams.Add(smithy.NewErrParamRequired("AttributeName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateTransactGetItem(v *types.TransactGetItem) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "TransactGetItem"} + if v.Get == nil { + invalidParams.Add(smithy.NewErrParamRequired("Get")) + } else if v.Get != nil { + if err := validateGet(v.Get); err != nil { + invalidParams.AddNested("Get", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateTransactGetItemList(v []types.TransactGetItem) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "TransactGetItemList"} + for i := range v { + if err := validateTransactGetItem(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateTransactWriteItem(v *types.TransactWriteItem) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "TransactWriteItem"} + if v.ConditionCheck != nil { + if err := validateConditionCheck(v.ConditionCheck); err != nil { + invalidParams.AddNested("ConditionCheck", err.(smithy.InvalidParamsError)) + } + } + if v.Put != nil { + if err := validatePut(v.Put); err != nil { + invalidParams.AddNested("Put", err.(smithy.InvalidParamsError)) + } + } + if v.Delete != nil { + if err := validateDelete(v.Delete); err != nil { + invalidParams.AddNested("Delete", err.(smithy.InvalidParamsError)) + } + } + if v.Update != nil { + if err := validateUpdate(v.Update); err != nil { + invalidParams.AddNested("Update", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateTransactWriteItemList(v []types.TransactWriteItem) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "TransactWriteItemList"} + for i := range v { + if err := validateTransactWriteItem(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateUpdate(v *types.Update) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "Update"} + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if v.UpdateExpression == nil { + invalidParams.Add(smithy.NewErrParamRequired("UpdateExpression")) + } + if v.TableName == nil { + invalidParams.Add(smithy.NewErrParamRequired("TableName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateUpdateGlobalSecondaryIndexAction(v *types.UpdateGlobalSecondaryIndexAction) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UpdateGlobalSecondaryIndexAction"} + if v.IndexName == nil { + invalidParams.Add(smithy.NewErrParamRequired("IndexName")) + } + if v.ProvisionedThroughput != nil { + if err := validateProvisionedThroughput(v.ProvisionedThroughput); err != nil { + invalidParams.AddNested("ProvisionedThroughput", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateUpdateReplicationGroupMemberAction(v *types.UpdateReplicationGroupMemberAction) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UpdateReplicationGroupMemberAction"} + if v.RegionName == nil { + invalidParams.Add(smithy.NewErrParamRequired("RegionName")) + } + if v.GlobalSecondaryIndexes != nil { + if err := validateReplicaGlobalSecondaryIndexList(v.GlobalSecondaryIndexes); err != nil { + invalidParams.AddNested("GlobalSecondaryIndexes", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateWriteRequest(v *types.WriteRequest) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "WriteRequest"} + if v.PutRequest != nil { + if err := validatePutRequest(v.PutRequest); err != nil { + invalidParams.AddNested("PutRequest", err.(smithy.InvalidParamsError)) + } + } + if v.DeleteRequest != nil { + if err := validateDeleteRequest(v.DeleteRequest); err != nil { + invalidParams.AddNested("DeleteRequest", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateWriteRequests(v []types.WriteRequest) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "WriteRequests"} + for i := range v { + if err := validateWriteRequest(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpBatchExecuteStatementInput(v *BatchExecuteStatementInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "BatchExecuteStatementInput"} + if v.Statements == nil { + invalidParams.Add(smithy.NewErrParamRequired("Statements")) + } else if v.Statements != nil { + if err := validatePartiQLBatchRequest(v.Statements); err != nil { + invalidParams.AddNested("Statements", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpBatchGetItemInput(v *BatchGetItemInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "BatchGetItemInput"} + if v.RequestItems == nil { + invalidParams.Add(smithy.NewErrParamRequired("RequestItems")) + } else if v.RequestItems != nil { + if err := validateBatchGetRequestMap(v.RequestItems); err != nil { + invalidParams.AddNested("RequestItems", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpBatchWriteItemInput(v *BatchWriteItemInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "BatchWriteItemInput"} + if v.RequestItems == nil { + invalidParams.Add(smithy.NewErrParamRequired("RequestItems")) + } else if v.RequestItems != nil { + if err := validateBatchWriteItemRequestMap(v.RequestItems); err != nil { + invalidParams.AddNested("RequestItems", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpCreateBackupInput(v *CreateBackupInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreateBackupInput"} + if v.TableName == nil { + invalidParams.Add(smithy.NewErrParamRequired("TableName")) + } + if v.BackupName == nil { + invalidParams.Add(smithy.NewErrParamRequired("BackupName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpCreateGlobalTableInput(v *CreateGlobalTableInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreateGlobalTableInput"} + if v.GlobalTableName == nil { + invalidParams.Add(smithy.NewErrParamRequired("GlobalTableName")) + } + if v.ReplicationGroup == nil { + invalidParams.Add(smithy.NewErrParamRequired("ReplicationGroup")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpCreateTableInput(v *CreateTableInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreateTableInput"} + if v.AttributeDefinitions == nil { + invalidParams.Add(smithy.NewErrParamRequired("AttributeDefinitions")) + } else if v.AttributeDefinitions != nil { + if err := validateAttributeDefinitions(v.AttributeDefinitions); err != nil { + invalidParams.AddNested("AttributeDefinitions", err.(smithy.InvalidParamsError)) + } + } + if v.TableName == nil { + invalidParams.Add(smithy.NewErrParamRequired("TableName")) + } + if v.KeySchema == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeySchema")) + } else if v.KeySchema != nil { + if err := validateKeySchema(v.KeySchema); err != nil { + invalidParams.AddNested("KeySchema", err.(smithy.InvalidParamsError)) + } + } + if v.LocalSecondaryIndexes != nil { + if err := validateLocalSecondaryIndexList(v.LocalSecondaryIndexes); err != nil { + invalidParams.AddNested("LocalSecondaryIndexes", err.(smithy.InvalidParamsError)) + } + } + if v.GlobalSecondaryIndexes != nil { + if err := validateGlobalSecondaryIndexList(v.GlobalSecondaryIndexes); err != nil { + invalidParams.AddNested("GlobalSecondaryIndexes", err.(smithy.InvalidParamsError)) + } + } + if v.ProvisionedThroughput != nil { + if err := validateProvisionedThroughput(v.ProvisionedThroughput); err != nil { + invalidParams.AddNested("ProvisionedThroughput", err.(smithy.InvalidParamsError)) + } + } + if v.StreamSpecification != nil { + if err := validateStreamSpecification(v.StreamSpecification); err != nil { + invalidParams.AddNested("StreamSpecification", err.(smithy.InvalidParamsError)) + } + } + if v.Tags != nil { + if err := validateTagList(v.Tags); err != nil { + invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteBackupInput(v *DeleteBackupInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteBackupInput"} + if v.BackupArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("BackupArn")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteItemInput(v *DeleteItemInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteItemInput"} + if v.TableName == nil { + invalidParams.Add(smithy.NewErrParamRequired("TableName")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteResourcePolicyInput(v *DeleteResourcePolicyInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteResourcePolicyInput"} + if v.ResourceArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("ResourceArn")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteTableInput(v *DeleteTableInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteTableInput"} + if v.TableName == nil { + invalidParams.Add(smithy.NewErrParamRequired("TableName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDescribeBackupInput(v *DescribeBackupInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DescribeBackupInput"} + if v.BackupArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("BackupArn")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDescribeContinuousBackupsInput(v *DescribeContinuousBackupsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DescribeContinuousBackupsInput"} + if v.TableName == nil { + invalidParams.Add(smithy.NewErrParamRequired("TableName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDescribeContributorInsightsInput(v *DescribeContributorInsightsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DescribeContributorInsightsInput"} + if v.TableName == nil { + invalidParams.Add(smithy.NewErrParamRequired("TableName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDescribeExportInput(v *DescribeExportInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DescribeExportInput"} + if v.ExportArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("ExportArn")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDescribeGlobalTableInput(v *DescribeGlobalTableInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DescribeGlobalTableInput"} + if v.GlobalTableName == nil { + invalidParams.Add(smithy.NewErrParamRequired("GlobalTableName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDescribeGlobalTableSettingsInput(v *DescribeGlobalTableSettingsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DescribeGlobalTableSettingsInput"} + if v.GlobalTableName == nil { + invalidParams.Add(smithy.NewErrParamRequired("GlobalTableName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDescribeImportInput(v *DescribeImportInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DescribeImportInput"} + if v.ImportArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("ImportArn")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDescribeKinesisStreamingDestinationInput(v *DescribeKinesisStreamingDestinationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DescribeKinesisStreamingDestinationInput"} + if v.TableName == nil { + invalidParams.Add(smithy.NewErrParamRequired("TableName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDescribeTableInput(v *DescribeTableInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DescribeTableInput"} + if v.TableName == nil { + invalidParams.Add(smithy.NewErrParamRequired("TableName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDescribeTableReplicaAutoScalingInput(v *DescribeTableReplicaAutoScalingInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DescribeTableReplicaAutoScalingInput"} + if v.TableName == nil { + invalidParams.Add(smithy.NewErrParamRequired("TableName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDescribeTimeToLiveInput(v *DescribeTimeToLiveInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DescribeTimeToLiveInput"} + if v.TableName == nil { + invalidParams.Add(smithy.NewErrParamRequired("TableName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDisableKinesisStreamingDestinationInput(v *DisableKinesisStreamingDestinationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DisableKinesisStreamingDestinationInput"} + if v.TableName == nil { + invalidParams.Add(smithy.NewErrParamRequired("TableName")) + } + if v.StreamArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("StreamArn")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpEnableKinesisStreamingDestinationInput(v *EnableKinesisStreamingDestinationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "EnableKinesisStreamingDestinationInput"} + if v.TableName == nil { + invalidParams.Add(smithy.NewErrParamRequired("TableName")) + } + if v.StreamArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("StreamArn")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpExecuteStatementInput(v *ExecuteStatementInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ExecuteStatementInput"} + if v.Statement == nil { + invalidParams.Add(smithy.NewErrParamRequired("Statement")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpExecuteTransactionInput(v *ExecuteTransactionInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ExecuteTransactionInput"} + if v.TransactStatements == nil { + invalidParams.Add(smithy.NewErrParamRequired("TransactStatements")) + } else if v.TransactStatements != nil { + if err := validateParameterizedStatements(v.TransactStatements); err != nil { + invalidParams.AddNested("TransactStatements", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpExportTableToPointInTimeInput(v *ExportTableToPointInTimeInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ExportTableToPointInTimeInput"} + if v.TableArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("TableArn")) + } + if v.S3Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("S3Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetItemInput(v *GetItemInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetItemInput"} + if v.TableName == nil { + invalidParams.Add(smithy.NewErrParamRequired("TableName")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetResourcePolicyInput(v *GetResourcePolicyInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetResourcePolicyInput"} + if v.ResourceArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("ResourceArn")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpImportTableInput(v *ImportTableInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ImportTableInput"} + if v.S3BucketSource == nil { + invalidParams.Add(smithy.NewErrParamRequired("S3BucketSource")) + } else if v.S3BucketSource != nil { + if err := validateS3BucketSource(v.S3BucketSource); err != nil { + invalidParams.AddNested("S3BucketSource", err.(smithy.InvalidParamsError)) + } + } + if len(v.InputFormat) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("InputFormat")) + } + if v.TableCreationParameters == nil { + invalidParams.Add(smithy.NewErrParamRequired("TableCreationParameters")) + } else if v.TableCreationParameters != nil { + if err := validateTableCreationParameters(v.TableCreationParameters); err != nil { + invalidParams.AddNested("TableCreationParameters", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListTagsOfResourceInput(v *ListTagsOfResourceInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListTagsOfResourceInput"} + if v.ResourceArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("ResourceArn")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutItemInput(v *PutItemInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutItemInput"} + if v.TableName == nil { + invalidParams.Add(smithy.NewErrParamRequired("TableName")) + } + if v.Item == nil { + invalidParams.Add(smithy.NewErrParamRequired("Item")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutResourcePolicyInput(v *PutResourcePolicyInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutResourcePolicyInput"} + if v.ResourceArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("ResourceArn")) + } + if v.Policy == nil { + invalidParams.Add(smithy.NewErrParamRequired("Policy")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpQueryInput(v *QueryInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "QueryInput"} + if v.TableName == nil { + invalidParams.Add(smithy.NewErrParamRequired("TableName")) + } + if v.KeyConditions != nil { + if err := validateKeyConditions(v.KeyConditions); err != nil { + invalidParams.AddNested("KeyConditions", err.(smithy.InvalidParamsError)) + } + } + if v.QueryFilter != nil { + if err := validateFilterConditionMap(v.QueryFilter); err != nil { + invalidParams.AddNested("QueryFilter", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpRestoreTableFromBackupInput(v *RestoreTableFromBackupInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "RestoreTableFromBackupInput"} + if v.TargetTableName == nil { + invalidParams.Add(smithy.NewErrParamRequired("TargetTableName")) + } + if v.BackupArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("BackupArn")) + } + if v.GlobalSecondaryIndexOverride != nil { + if err := validateGlobalSecondaryIndexList(v.GlobalSecondaryIndexOverride); err != nil { + invalidParams.AddNested("GlobalSecondaryIndexOverride", err.(smithy.InvalidParamsError)) + } + } + if v.LocalSecondaryIndexOverride != nil { + if err := validateLocalSecondaryIndexList(v.LocalSecondaryIndexOverride); err != nil { + invalidParams.AddNested("LocalSecondaryIndexOverride", err.(smithy.InvalidParamsError)) + } + } + if v.ProvisionedThroughputOverride != nil { + if err := validateProvisionedThroughput(v.ProvisionedThroughputOverride); err != nil { + invalidParams.AddNested("ProvisionedThroughputOverride", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpRestoreTableToPointInTimeInput(v *RestoreTableToPointInTimeInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "RestoreTableToPointInTimeInput"} + if v.TargetTableName == nil { + invalidParams.Add(smithy.NewErrParamRequired("TargetTableName")) + } + if v.GlobalSecondaryIndexOverride != nil { + if err := validateGlobalSecondaryIndexList(v.GlobalSecondaryIndexOverride); err != nil { + invalidParams.AddNested("GlobalSecondaryIndexOverride", err.(smithy.InvalidParamsError)) + } + } + if v.LocalSecondaryIndexOverride != nil { + if err := validateLocalSecondaryIndexList(v.LocalSecondaryIndexOverride); err != nil { + invalidParams.AddNested("LocalSecondaryIndexOverride", err.(smithy.InvalidParamsError)) + } + } + if v.ProvisionedThroughputOverride != nil { + if err := validateProvisionedThroughput(v.ProvisionedThroughputOverride); err != nil { + invalidParams.AddNested("ProvisionedThroughputOverride", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpScanInput(v *ScanInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ScanInput"} + if v.TableName == nil { + invalidParams.Add(smithy.NewErrParamRequired("TableName")) + } + if v.ScanFilter != nil { + if err := validateFilterConditionMap(v.ScanFilter); err != nil { + invalidParams.AddNested("ScanFilter", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpTagResourceInput(v *TagResourceInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "TagResourceInput"} + if v.ResourceArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("ResourceArn")) + } + if v.Tags == nil { + invalidParams.Add(smithy.NewErrParamRequired("Tags")) + } else if v.Tags != nil { + if err := validateTagList(v.Tags); err != nil { + invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpTransactGetItemsInput(v *TransactGetItemsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "TransactGetItemsInput"} + if v.TransactItems == nil { + invalidParams.Add(smithy.NewErrParamRequired("TransactItems")) + } else if v.TransactItems != nil { + if err := validateTransactGetItemList(v.TransactItems); err != nil { + invalidParams.AddNested("TransactItems", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpTransactWriteItemsInput(v *TransactWriteItemsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "TransactWriteItemsInput"} + if v.TransactItems == nil { + invalidParams.Add(smithy.NewErrParamRequired("TransactItems")) + } else if v.TransactItems != nil { + if err := validateTransactWriteItemList(v.TransactItems); err != nil { + invalidParams.AddNested("TransactItems", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpUntagResourceInput(v *UntagResourceInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UntagResourceInput"} + if v.ResourceArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("ResourceArn")) + } + if v.TagKeys == nil { + invalidParams.Add(smithy.NewErrParamRequired("TagKeys")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpUpdateContinuousBackupsInput(v *UpdateContinuousBackupsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UpdateContinuousBackupsInput"} + if v.TableName == nil { + invalidParams.Add(smithy.NewErrParamRequired("TableName")) + } + if v.PointInTimeRecoverySpecification == nil { + invalidParams.Add(smithy.NewErrParamRequired("PointInTimeRecoverySpecification")) + } else if v.PointInTimeRecoverySpecification != nil { + if err := validatePointInTimeRecoverySpecification(v.PointInTimeRecoverySpecification); err != nil { + invalidParams.AddNested("PointInTimeRecoverySpecification", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpUpdateContributorInsightsInput(v *UpdateContributorInsightsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UpdateContributorInsightsInput"} + if v.TableName == nil { + invalidParams.Add(smithy.NewErrParamRequired("TableName")) + } + if len(v.ContributorInsightsAction) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("ContributorInsightsAction")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpUpdateGlobalTableInput(v *UpdateGlobalTableInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UpdateGlobalTableInput"} + if v.GlobalTableName == nil { + invalidParams.Add(smithy.NewErrParamRequired("GlobalTableName")) + } + if v.ReplicaUpdates == nil { + invalidParams.Add(smithy.NewErrParamRequired("ReplicaUpdates")) + } else if v.ReplicaUpdates != nil { + if err := validateReplicaUpdateList(v.ReplicaUpdates); err != nil { + invalidParams.AddNested("ReplicaUpdates", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpUpdateGlobalTableSettingsInput(v *UpdateGlobalTableSettingsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UpdateGlobalTableSettingsInput"} + if v.GlobalTableName == nil { + invalidParams.Add(smithy.NewErrParamRequired("GlobalTableName")) + } + if v.GlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate != nil { + if err := validateAutoScalingSettingsUpdate(v.GlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate); err != nil { + invalidParams.AddNested("GlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate", err.(smithy.InvalidParamsError)) + } + } + if v.GlobalTableGlobalSecondaryIndexSettingsUpdate != nil { + if err := validateGlobalTableGlobalSecondaryIndexSettingsUpdateList(v.GlobalTableGlobalSecondaryIndexSettingsUpdate); err != nil { + invalidParams.AddNested("GlobalTableGlobalSecondaryIndexSettingsUpdate", err.(smithy.InvalidParamsError)) + } + } + if v.ReplicaSettingsUpdate != nil { + if err := validateReplicaSettingsUpdateList(v.ReplicaSettingsUpdate); err != nil { + invalidParams.AddNested("ReplicaSettingsUpdate", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpUpdateItemInput(v *UpdateItemInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UpdateItemInput"} + if v.TableName == nil { + invalidParams.Add(smithy.NewErrParamRequired("TableName")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpUpdateKinesisStreamingDestinationInput(v *UpdateKinesisStreamingDestinationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UpdateKinesisStreamingDestinationInput"} + if v.TableName == nil { + invalidParams.Add(smithy.NewErrParamRequired("TableName")) + } + if v.StreamArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("StreamArn")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpUpdateTableInput(v *UpdateTableInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UpdateTableInput"} + if v.AttributeDefinitions != nil { + if err := validateAttributeDefinitions(v.AttributeDefinitions); err != nil { + invalidParams.AddNested("AttributeDefinitions", err.(smithy.InvalidParamsError)) + } + } + if v.TableName == nil { + invalidParams.Add(smithy.NewErrParamRequired("TableName")) + } + if v.ProvisionedThroughput != nil { + if err := validateProvisionedThroughput(v.ProvisionedThroughput); err != nil { + invalidParams.AddNested("ProvisionedThroughput", err.(smithy.InvalidParamsError)) + } + } + if v.GlobalSecondaryIndexUpdates != nil { + if err := validateGlobalSecondaryIndexUpdateList(v.GlobalSecondaryIndexUpdates); err != nil { + invalidParams.AddNested("GlobalSecondaryIndexUpdates", err.(smithy.InvalidParamsError)) + } + } + if v.StreamSpecification != nil { + if err := validateStreamSpecification(v.StreamSpecification); err != nil { + invalidParams.AddNested("StreamSpecification", err.(smithy.InvalidParamsError)) + } + } + if v.ReplicaUpdates != nil { + if err := validateReplicationGroupUpdateList(v.ReplicaUpdates); err != nil { + invalidParams.AddNested("ReplicaUpdates", err.(smithy.InvalidParamsError)) + } + } + if v.GlobalTableWitnessUpdates != nil { + if err := validateGlobalTableWitnessGroupUpdateList(v.GlobalTableWitnessUpdates); err != nil { + invalidParams.AddNested("GlobalTableWitnessUpdates", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpUpdateTableReplicaAutoScalingInput(v *UpdateTableReplicaAutoScalingInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UpdateTableReplicaAutoScalingInput"} + if v.GlobalSecondaryIndexUpdates != nil { + if err := validateGlobalSecondaryIndexAutoScalingUpdateList(v.GlobalSecondaryIndexUpdates); err != nil { + invalidParams.AddNested("GlobalSecondaryIndexUpdates", err.(smithy.InvalidParamsError)) + } + } + if v.TableName == nil { + invalidParams.Add(smithy.NewErrParamRequired("TableName")) + } + if v.ProvisionedWriteCapacityAutoScalingUpdate != nil { + if err := validateAutoScalingSettingsUpdate(v.ProvisionedWriteCapacityAutoScalingUpdate); err != nil { + invalidParams.AddNested("ProvisionedWriteCapacityAutoScalingUpdate", err.(smithy.InvalidParamsError)) + } + } + if v.ReplicaUpdates != nil { + if err := validateReplicaAutoScalingUpdateList(v.ReplicaUpdates); err != nil { + invalidParams.AddNested("ReplicaUpdates", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpUpdateTimeToLiveInput(v *UpdateTimeToLiveInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UpdateTimeToLiveInput"} + if v.TableName == nil { + invalidParams.Add(smithy.NewErrParamRequired("TableName")) + } + if v.TimeToLiveSpecification == nil { + invalidParams.Add(smithy.NewErrParamRequired("TimeToLiveSpecification")) + } else if v.TimeToLiveSpecification != nil { + if err := validateTimeToLiveSpecification(v.TimeToLiveSpecification); err != nil { + invalidParams.AddNested("TimeToLiveSpecification", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md index c81265a25d..607fc09220 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md @@ -1,3 +1,15 @@ +# v1.13.1 (2025-08-27) + +* **Dependency Update**: Update to smithy-go v1.23.0. + +# v1.13.0 (2025-07-28) + +* **Feature**: Add support for HTTP interceptors. + +# v1.12.4 (2025-06-17) + +* **Dependency Update**: Update to smithy-go v1.22.4. + # v1.12.3 (2025-02-18) * **Bug Fix**: Bump go version to 1.22 diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go index d83e533eff..7a0b6aae29 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go @@ -3,4 +3,4 @@ package acceptencoding // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.12.3" +const goModuleVersion = "1.13.1" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery/CHANGELOG.md new file mode 100644 index 0000000000..6214ff2917 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery/CHANGELOG.md @@ -0,0 +1,466 @@ +# v1.11.6 (2025-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.5 (2025-08-27) + +* **Dependency Update**: Update to smithy-go v1.23.0. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.4 (2025-08-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.3 (2025-08-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.2 (2025-08-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.1 (2025-07-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.0 (2025-07-28) + +* **Feature**: Add support for HTTP interceptors. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.18 (2025-07-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.17 (2025-06-17) + +* **Dependency Update**: Update to smithy-go v1.22.4. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.16 (2025-06-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.15 (2025-02-27) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.14 (2025-02-18) + +* **Bug Fix**: Bump go version to 1.22 +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.13 (2025-02-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.12 (2025-01-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.11 (2025-01-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.10 (2025-01-24) + +* **Dependency Update**: Updated to the latest SDK module versions +* **Dependency Update**: Upgrade to smithy-go v1.22.2. + +# v1.10.9 (2025-01-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.8 (2025-01-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.7 (2024-12-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.6 (2024-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.5 (2024-11-18) + +* **Dependency Update**: Update to smithy-go v1.22.1. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.4 (2024-11-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.3 (2024-10-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.2 (2024-10-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.1 (2024-10-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.0 (2024-10-04) + +* **Feature**: Add support for HTTP client metrics. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.19 (2024-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.18 (2024-09-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.17 (2024-08-15) + +* **Dependency Update**: Bump minimum Go version to 1.21. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.16 (2024-07-10.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.15 (2024-07-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.14 (2024-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.13 (2024-06-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.12 (2024-06-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.11 (2024-06-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.10 (2024-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.9 (2024-06-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.8 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.7 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.6 (2024-03-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.5 (2024-03-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.4 (2024-03-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.3 (2024-03-04) + +* **Bug Fix**: Fix misaligned struct member used in atomic operation. This fixes a panic caused by attempting to atomically access a struct member which is not 64-bit aligned when running on 32-bit arch, due to the smaller sync.Map struct. + +# v1.9.2 (2024-02-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.1 (2024-02-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.0 (2024-02-13) + +* **Feature**: Bump minimum Go version to 1.20 per our language support policy. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.11 (2024-01-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.10 (2023-12-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.9 (2023-12-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.8 (2023-11-30.2) + +* **Bug Fix**: Respect caller region overrides in endpoint discovery. + +# v1.8.7 (2023-11-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.6 (2023-11-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.5 (2023-11-28.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.4 (2023-11-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.3 (2023-11-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.2 (2023-11-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.1 (2023-11-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.0 (2023-10-31) + +* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.37 (2023-10-12) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.36 (2023-10-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.35 (2023-08-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.34 (2023-08-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.33 (2023-08-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.32 (2023-08-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.31 (2023-07-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.30 (2023-07-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.29 (2023-07-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.28 (2023-06-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.27 (2023-04-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.26 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.25 (2023-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.24 (2023-03-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.23 (2023-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.22 (2023-02-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.21 (2022-12-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.20 (2022-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.19 (2022-10-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.18 (2022-10-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.17 (2022-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.16 (2022-09-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.15 (2022-09-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.14 (2022-08-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.13 (2022-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.12 (2022-08-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.11 (2022-08-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.10 (2022-08-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.9 (2022-08-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.8 (2022-07-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.7 (2022-06-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.6 (2022-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.5 (2022-05-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.4 (2022-04-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.3 (2022-03-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.2 (2022-03-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.1 (2022-03-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.0 (2022-03-08) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.0 (2022-02-24) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.0 (2022-01-14) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.0 (2022-01-07) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.3 (2021-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.2 (2021-11-30) + +* **Bug Fix**: Fixed a race condition that caused concurrent calls relying on endpoint discovery to share the same `url.URL` reference in their operation's http.Request. + +# v1.3.1 (2021-11-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.0 (2021-11-06) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.0 (2021-10-21) + +* **Feature**: Updated to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.2 (2021-10-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.1 (2021-09-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.0 (2021-08-27) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.3 (2021-08-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.2 (2021-08-04) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.1 (2021-07-15) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.0 (2021-06-25) + +* **Release**: Release new modules +* **Feature**: Module supporting endpoint-discovery across all service clients. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery/LICENSE.txt new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/crr/cache.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery/cache.go similarity index 69% rename from vendor/github.com/aws/aws-sdk-go/aws/crr/cache.go rename to vendor/github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery/cache.go index eeb3bc0c5a..6abd3029c0 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/crr/cache.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery/cache.go @@ -1,18 +1,19 @@ -package crr +package endpointdiscovery import ( + "sync" "sync/atomic" ) // EndpointCache is an LRU cache that holds a series of endpoints -// based on some key. The datastructure makes use of a read write +// based on some key. The data structure makes use of a read write // mutex to enable asynchronous use. type EndpointCache struct { // size is used to count the number elements in the cache. // The atomic package is used to ensure this size is accurate when // using multiple goroutines. size int64 - endpoints syncMap + endpoints sync.Map endpointLimit int64 } @@ -21,11 +22,11 @@ type EndpointCache struct { func NewEndpointCache(endpointLimit int64) *EndpointCache { return &EndpointCache{ endpointLimit: endpointLimit, - endpoints: newSyncMap(), + endpoints: sync.Map{}, } } -// get is a concurrent safe get operation that will retrieve an endpoint +// Get is a concurrent safe get operation that will retrieve an endpoint // based on endpointKey. A boolean will also be returned to illustrate whether // or not the endpoint had been found. func (c *EndpointCache) get(endpointKey string) (Endpoint, bool) { @@ -44,33 +45,20 @@ func (c *EndpointCache) get(endpointKey string) (Endpoint, bool) { // Has returns if the enpoint cache contains a valid entry for the endpoint key // provided. func (c *EndpointCache) Has(endpointKey string) bool { - endpoint, ok := c.get(endpointKey) - _, found := endpoint.GetValidAddress() - - return ok && found + _, found := c.Get(endpointKey) + return found } // Get will retrieve a weighted address based off of the endpoint key. If an endpoint // should be retrieved, due to not existing or the current endpoint has expired // the Discoverer object that was passed in will attempt to discover a new endpoint // and add that to the cache. -func (c *EndpointCache) Get(d Discoverer, endpointKey string, required bool) (WeightedAddress, error) { - var err error +func (c *EndpointCache) Get(endpointKey string) (WeightedAddress, bool) { endpoint, ok := c.get(endpointKey) - weighted, found := endpoint.GetValidAddress() - shouldGet := !ok || !found - - if required && shouldGet { - if endpoint, err = c.discover(d, endpointKey); err != nil { - return WeightedAddress{}, err - } - - weighted, _ = endpoint.GetValidAddress() - } else if shouldGet { - go c.discover(d, endpointKey) + if !ok { + return WeightedAddress{}, false } - - return weighted, nil + return endpoint.GetValidAddress() } // Add is a concurrent safe operation that will allow new endpoints to be added @@ -84,12 +72,13 @@ func (c *EndpointCache) Add(endpoint Endpoint) { return } } - c.endpoints.Store(endpoint.Key, endpoint) size := atomic.AddInt64(&c.size, 1) if size > 0 && size > c.endpointLimit { c.deleteRandomKey() } + + c.endpoints.Store(endpoint.Key, endpoint) } // deleteRandomKey will delete a random key from the cache. If @@ -107,16 +96,3 @@ func (c *EndpointCache) deleteRandomKey() bool { return found } - -// discover will get and store and endpoint using the Discoverer. -func (c *EndpointCache) discover(d Discoverer, endpointKey string) (Endpoint, error) { - endpoint, err := d.Discover() - if err != nil { - return Endpoint{}, err - } - - endpoint.Key = endpointKey - c.Add(endpoint) - - return endpoint, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery/doc.go new file mode 100644 index 0000000000..36a16a7553 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery/doc.go @@ -0,0 +1,33 @@ +/* +Package endpointdiscovery provides a feature implemented in the AWS SDK for Go V2 that +allows client to fetch a valid endpoint to serve an API request. Discovered +endpoints are stored in an internal thread-safe cache to reduce the number +of calls made to fetch the endpoint. + +Endpoint discovery stores endpoint by associating to a generated cache key. +Cache key is built using service-modeled sdkId and any service-defined input +identifiers provided by the customer. + +Endpoint cache keys follow the grammar: + + key = sdkId.identifiers + + identifiers = map[string]string + +The endpoint discovery cache implementation is internal. Clients resolves the +cache size to 10 entries. Each entry may contain multiple host addresses as +returned by the service. + +Each discovered endpoint has a TTL associated to it, and are evicted from +cache lazily i.e. when client tries to retrieve an endpoint but finds an +expired entry instead. + +Endpoint discovery feature can be turned on by setting the +`AWS_ENABLE_ENDPOINT_DISCOVERY` env variable to TRUE. + +By default, the feature is set to AUTO - indicating operations that require +endpoint discovery always use it. To completely turn off the feature, one +should set the value as FALSE. Similar configuration rules apply for shared +config file where key is `endpoint_discovery_enabled`. +*/ +package endpointdiscovery diff --git a/vendor/github.com/aws/aws-sdk-go/aws/crr/endpoint.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery/endpoint.go similarity index 68% rename from vendor/github.com/aws/aws-sdk-go/aws/crr/endpoint.go rename to vendor/github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery/endpoint.go index 2b088bdbc7..5fa06f2aea 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/crr/endpoint.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery/endpoint.go @@ -1,12 +1,8 @@ -package crr +package endpointdiscovery import ( "net/url" - "sort" - "strings" "time" - - "github.com/aws/aws-sdk-go/aws" ) // Endpoint represents an endpoint used in endpoint discovery. @@ -55,8 +51,6 @@ func (e *Endpoint) GetValidAddress() (WeightedAddress, bool) { we := e.Addresses[i] if we.HasExpired() { - e.Addresses = append(e.Addresses[:i], e.Addresses[i+1:]...) - i-- continue } @@ -86,38 +80,6 @@ func (e *Endpoint) Prune() bool { return true } -// Discoverer is an interface used to discovery which endpoint hit. This -// allows for specifics about what parameters need to be used to be contained -// in the Discoverer implementor. -type Discoverer interface { - Discover() (Endpoint, error) -} - -// BuildEndpointKey will sort the keys in alphabetical order and then retrieve -// the values in that order. Those values are then concatenated together to form -// the endpoint key. -func BuildEndpointKey(params map[string]*string) string { - keys := make([]string, len(params)) - i := 0 - - for k := range params { - keys[i] = k - i++ - } - sort.Strings(keys) - - values := make([]string, len(params)) - for i, k := range keys { - if params[k] == nil { - continue - } - - values[i] = aws.StringValue(params[k]) - } - - return strings.Join(values, ".") -} - func cloneURL(u *url.URL) (clone *url.URL) { clone = &url.URL{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery/go_module_metadata.go new file mode 100644 index 0000000000..b83027a6e1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package endpointdiscovery + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.11.6" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery/middleware.go new file mode 100644 index 0000000000..c6b073d21f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery/middleware.go @@ -0,0 +1,102 @@ +package endpointdiscovery + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" +) + +// DiscoverEndpointOptions are optionals used with DiscoverEndpoint operation. +type DiscoverEndpointOptions struct { + + // EndpointResolverUsedForDiscovery is the endpoint resolver used to + // resolve an endpoint for discovery api call. + EndpointResolverUsedForDiscovery interface{} + + // DisableHTTPS will disable tls for endpoint discovery call and + // subsequent discovered endpoint if service did not return an + // endpoint scheme. + DisableHTTPS bool + + // Logger to log warnings or debug statements. + Logger logging.Logger +} + +// DiscoverEndpoint is a finalize step middleware used to discover endpoint +// for an API operation. +type DiscoverEndpoint struct { + + // Options provides optional settings used with + // Discover Endpoint operation. + Options []func(*DiscoverEndpointOptions) + + // DiscoverOperation represents the endpoint discovery operation that + // returns an Endpoint or error. + DiscoverOperation func(ctx context.Context, region string, options ...func(*DiscoverEndpointOptions)) (WeightedAddress, error) + + // EndpointDiscoveryEnableState represents the customer configuration for endpoint + // discovery feature. + EndpointDiscoveryEnableState aws.EndpointDiscoveryEnableState + + // EndpointDiscoveryRequired states if an operation requires to perform + // endpoint discovery. + EndpointDiscoveryRequired bool + + // The client region + Region string +} + +// ID represents the middleware identifier +func (*DiscoverEndpoint) ID() string { + return "DiscoverEndpoint" +} + +// HandleFinalize performs endpoint discovery and updates the request host with +// the result. +// +// The resolved host from this procedure MUST override that of modeled endpoint +// resolution and middleware should be ordered accordingly. +func (d *DiscoverEndpoint) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, +) (out middleware.FinalizeOutput, metadata middleware.Metadata, err error) { + if d.EndpointDiscoveryEnableState == aws.EndpointDiscoveryDisabled { + return next.HandleFinalize(ctx, in) + } + + if !d.EndpointDiscoveryRequired && d.EndpointDiscoveryEnableState != aws.EndpointDiscoveryEnabled { + return next.HandleFinalize(ctx, in) + } + + if es := awsmiddleware.GetEndpointSource(ctx); es == aws.EndpointSourceCustom { + if d.EndpointDiscoveryEnableState == aws.EndpointDiscoveryEnabled { + return middleware.FinalizeOutput{}, middleware.Metadata{}, + fmt.Errorf("Invalid configuration: endpoint discovery is enabled, but a custom endpoint is provided") + } + + return next.HandleFinalize(ctx, in) + } + + weightedAddress, err := d.DiscoverOperation(ctx, d.Region, d.Options...) + if err != nil { + return middleware.FinalizeOutput{}, middleware.Metadata{}, err + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return middleware.FinalizeOutput{}, middleware.Metadata{}, + fmt.Errorf("expected request to be of type *smithyhttp.Request, got %T", in.Request) + } + + if weightedAddress.URL != nil { + // we only want the host, normal endpoint resolution can include path/query + req.URL.Host = weightedAddress.URL.Host + } + + return next.HandleFinalize(ctx, in) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md index 2b5ceb4b51..869246098e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md @@ -1,3 +1,21 @@ +# v1.13.0 (2025-07-28) + +* **Feature**: Add support for HTTP interceptors. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.18 (2025-07-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.17 (2025-06-17) + +* **Dependency Update**: Update to smithy-go v1.22.4. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.16 (2025-06-10) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.12.15 (2025-02-27) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go index a165a100f8..beae329a8f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go @@ -3,4 +3,4 @@ package presignedurl // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.12.15" +const goModuleVersion = "1.13.0" diff --git a/vendor/github.com/aws/aws-sdk-go/aws/crr/sync_map.go b/vendor/github.com/aws/aws-sdk-go/aws/crr/sync_map.go deleted file mode 100644 index f7b65ac013..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/crr/sync_map.go +++ /dev/null @@ -1,30 +0,0 @@ -//go:build go1.9 -// +build go1.9 - -package crr - -import ( - "sync" -) - -type syncMap sync.Map - -func newSyncMap() syncMap { - return syncMap{} -} - -func (m *syncMap) Load(key interface{}) (interface{}, bool) { - return (*sync.Map)(m).Load(key) -} - -func (m *syncMap) Store(key interface{}, value interface{}) { - (*sync.Map)(m).Store(key, value) -} - -func (m *syncMap) Delete(key interface{}) { - (*sync.Map)(m).Delete(key) -} - -func (m *syncMap) Range(f func(interface{}, interface{}) bool) { - (*sync.Map)(m).Range(f) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/crr/sync_map_1_8.go b/vendor/github.com/aws/aws-sdk-go/aws/crr/sync_map_1_8.go deleted file mode 100644 index eb4f6aca2f..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/crr/sync_map_1_8.go +++ /dev/null @@ -1,49 +0,0 @@ -//go:build !go1.9 -// +build !go1.9 - -package crr - -import ( - "sync" -) - -type syncMap struct { - container map[interface{}]interface{} - lock sync.RWMutex -} - -func newSyncMap() syncMap { - return syncMap{ - container: map[interface{}]interface{}{}, - } -} - -func (m *syncMap) Load(key interface{}) (interface{}, bool) { - m.lock.RLock() - defer m.lock.RUnlock() - - v, ok := m.container[key] - return v, ok -} - -func (m *syncMap) Store(key interface{}, value interface{}) { - m.lock.Lock() - defer m.lock.Unlock() - - m.container[key] = value -} - -func (m *syncMap) Delete(key interface{}) { - m.lock.Lock() - defer m.lock.Unlock() - - delete(m.container, key) -} - -func (m *syncMap) Range(f func(interface{}, interface{}) bool) { - for k, v := range m.container { - if !f(k, v) { - return - } - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go deleted file mode 100644 index 5cae6505d7..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go +++ /dev/null @@ -1,29214 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package dynamodb - -import ( - "fmt" - "net/url" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awsutil" - "github.com/aws/aws-sdk-go/aws/crr" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol" - "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" -) - -const opBatchExecuteStatement = "BatchExecuteStatement" - -// BatchExecuteStatementRequest generates a "aws/request.Request" representing the -// client's request for the BatchExecuteStatement operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See BatchExecuteStatement for more information on using the BatchExecuteStatement -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the BatchExecuteStatementRequest method. -// req, resp := client.BatchExecuteStatementRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/BatchExecuteStatement -func (c *DynamoDB) BatchExecuteStatementRequest(input *BatchExecuteStatementInput) (req *request.Request, output *BatchExecuteStatementOutput) { - op := &request.Operation{ - Name: opBatchExecuteStatement, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &BatchExecuteStatementInput{} - } - - output = &BatchExecuteStatementOutput{} - req = c.newRequest(op, input, output) - return -} - -// BatchExecuteStatement API operation for Amazon DynamoDB. -// -// This operation allows you to perform batch reads or writes on data stored -// in DynamoDB, using PartiQL. Each read statement in a BatchExecuteStatement -// must specify an equality condition on all key attributes. This enforces that -// each SELECT statement in a batch returns at most a single item. For more -// information, see Running batch operations with PartiQL for DynamoDB (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ql-reference.multiplestatements.batching.html). -// -// The entire batch must consist of either read statements or write statements, -// you cannot mix both in one batch. -// -// A HTTP 200 response does not mean that all statements in the BatchExecuteStatement -// succeeded. Error details for individual statements can be found under the -// Error (https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_BatchStatementResponse.html#DDB-Type-BatchStatementResponse-Error) -// field of the BatchStatementResponse for each statement. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB's -// API operation BatchExecuteStatement for usage and error information. -// -// Returned Error Types: -// -// - RequestLimitExceeded -// Throughput exceeds the current throughput quota for your account. Please -// contact Amazon Web Services Support (https://aws.amazon.com/support) to request -// a quota increase. -// -// - InternalServerError -// An error occurred on the server side. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/BatchExecuteStatement -func (c *DynamoDB) BatchExecuteStatement(input *BatchExecuteStatementInput) (*BatchExecuteStatementOutput, error) { - req, out := c.BatchExecuteStatementRequest(input) - return out, req.Send() -} - -// BatchExecuteStatementWithContext is the same as BatchExecuteStatement with the addition of -// the ability to pass a context and additional request options. -// -// See BatchExecuteStatement for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DynamoDB) BatchExecuteStatementWithContext(ctx aws.Context, input *BatchExecuteStatementInput, opts ...request.Option) (*BatchExecuteStatementOutput, error) { - req, out := c.BatchExecuteStatementRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opBatchGetItem = "BatchGetItem" - -// BatchGetItemRequest generates a "aws/request.Request" representing the -// client's request for the BatchGetItem operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See BatchGetItem for more information on using the BatchGetItem -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the BatchGetItemRequest method. -// req, resp := client.BatchGetItemRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/BatchGetItem -func (c *DynamoDB) BatchGetItemRequest(input *BatchGetItemInput) (req *request.Request, output *BatchGetItemOutput) { - op := &request.Operation{ - Name: opBatchGetItem, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"RequestItems"}, - OutputTokens: []string{"UnprocessedKeys"}, - LimitToken: "", - TruncationToken: "", - }, - } - - if input == nil { - input = &BatchGetItemInput{} - } - - output = &BatchGetItemOutput{} - req = c.newRequest(op, input, output) - // if custom endpoint for the request is set to a non empty string, - // we skip the endpoint discovery workflow. - if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { - if aws.BoolValue(req.Config.EnableEndpointDiscovery) { - de := discovererDescribeEndpoints{ - Required: false, - EndpointCache: c.endpointCache, - Params: map[string]*string{ - "op": aws.String(req.Operation.Name), - }, - Client: c, - } - - for k, v := range de.Params { - if v == nil { - delete(de.Params, k) - } - } - - req.Handlers.Build.PushFrontNamed(request.NamedHandler{ - Name: "crr.endpointdiscovery", - Fn: de.Handler, - }) - } - } - return -} - -// BatchGetItem API operation for Amazon DynamoDB. -// -// The BatchGetItem operation returns the attributes of one or more items from -// one or more tables. You identify requested items by primary key. -// -// A single operation can retrieve up to 16 MB of data, which can contain as -// many as 100 items. BatchGetItem returns a partial result if the response -// size limit is exceeded, the table's provisioned throughput is exceeded, more -// than 1MB per partition is requested, or an internal processing failure occurs. -// If a partial result is returned, the operation returns a value for UnprocessedKeys. -// You can use this value to retry the operation starting with the next item -// to get. -// -// If you request more than 100 items, BatchGetItem returns a ValidationException -// with the message "Too many items requested for the BatchGetItem call." -// -// For example, if you ask to retrieve 100 items, but each individual item is -// 300 KB in size, the system returns 52 items (so as not to exceed the 16 MB -// limit). It also returns an appropriate UnprocessedKeys value so you can get -// the next page of results. If desired, your application can include its own -// logic to assemble the pages of results into one dataset. -// -// If none of the items can be processed due to insufficient provisioned throughput -// on all of the tables in the request, then BatchGetItem returns a ProvisionedThroughputExceededException. -// If at least one of the items is successfully processed, then BatchGetItem -// completes successfully, while returning the keys of the unread items in UnprocessedKeys. -// -// If DynamoDB returns any unprocessed items, you should retry the batch operation -// on those items. However, we strongly recommend that you use an exponential -// backoff algorithm. If you retry the batch operation immediately, the underlying -// read or write requests can still fail due to throttling on the individual -// tables. If you delay the batch operation using exponential backoff, the individual -// requests in the batch are much more likely to succeed. -// -// For more information, see Batch Operations and Error Handling (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ErrorHandling.html#BatchOperations) -// in the Amazon DynamoDB Developer Guide. -// -// By default, BatchGetItem performs eventually consistent reads on every table -// in the request. If you want strongly consistent reads instead, you can set -// ConsistentRead to true for any or all tables. -// -// In order to minimize response latency, BatchGetItem may retrieve items in -// parallel. -// -// When designing your application, keep in mind that DynamoDB does not return -// items in any particular order. To help parse the response by item, include -// the primary key values for the items in your request in the ProjectionExpression -// parameter. -// -// If a requested item does not exist, it is not returned in the result. Requests -// for nonexistent items consume the minimum read capacity units according to -// the type of read. For more information, see Working with Tables (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#CapacityUnitCalculations) -// in the Amazon DynamoDB Developer Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB's -// API operation BatchGetItem for usage and error information. -// -// Returned Error Types: -// -// - ProvisionedThroughputExceededException -// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB -// automatically retry requests that receive this exception. Your request is -// eventually successful, unless your retry queue is too large to finish. Reduce -// the frequency of requests and use exponential backoff. For more information, -// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) -// in the Amazon DynamoDB Developer Guide. -// -// - ResourceNotFoundException -// The operation tried to access a nonexistent table or index. The resource -// might not be specified correctly, or its status might not be ACTIVE. -// -// - RequestLimitExceeded -// Throughput exceeds the current throughput quota for your account. Please -// contact Amazon Web Services Support (https://aws.amazon.com/support) to request -// a quota increase. -// -// - InternalServerError -// An error occurred on the server side. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/BatchGetItem -func (c *DynamoDB) BatchGetItem(input *BatchGetItemInput) (*BatchGetItemOutput, error) { - req, out := c.BatchGetItemRequest(input) - return out, req.Send() -} - -// BatchGetItemWithContext is the same as BatchGetItem with the addition of -// the ability to pass a context and additional request options. -// -// See BatchGetItem for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DynamoDB) BatchGetItemWithContext(ctx aws.Context, input *BatchGetItemInput, opts ...request.Option) (*BatchGetItemOutput, error) { - req, out := c.BatchGetItemRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// BatchGetItemPages iterates over the pages of a BatchGetItem operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See BatchGetItem method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a BatchGetItem operation. -// pageNum := 0 -// err := client.BatchGetItemPages(params, -// func(page *dynamodb.BatchGetItemOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -func (c *DynamoDB) BatchGetItemPages(input *BatchGetItemInput, fn func(*BatchGetItemOutput, bool) bool) error { - return c.BatchGetItemPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// BatchGetItemPagesWithContext same as BatchGetItemPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DynamoDB) BatchGetItemPagesWithContext(ctx aws.Context, input *BatchGetItemInput, fn func(*BatchGetItemOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *BatchGetItemInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.BatchGetItemRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*BatchGetItemOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opBatchWriteItem = "BatchWriteItem" - -// BatchWriteItemRequest generates a "aws/request.Request" representing the -// client's request for the BatchWriteItem operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See BatchWriteItem for more information on using the BatchWriteItem -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the BatchWriteItemRequest method. -// req, resp := client.BatchWriteItemRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/BatchWriteItem -func (c *DynamoDB) BatchWriteItemRequest(input *BatchWriteItemInput) (req *request.Request, output *BatchWriteItemOutput) { - op := &request.Operation{ - Name: opBatchWriteItem, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &BatchWriteItemInput{} - } - - output = &BatchWriteItemOutput{} - req = c.newRequest(op, input, output) - // if custom endpoint for the request is set to a non empty string, - // we skip the endpoint discovery workflow. - if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { - if aws.BoolValue(req.Config.EnableEndpointDiscovery) { - de := discovererDescribeEndpoints{ - Required: false, - EndpointCache: c.endpointCache, - Params: map[string]*string{ - "op": aws.String(req.Operation.Name), - }, - Client: c, - } - - for k, v := range de.Params { - if v == nil { - delete(de.Params, k) - } - } - - req.Handlers.Build.PushFrontNamed(request.NamedHandler{ - Name: "crr.endpointdiscovery", - Fn: de.Handler, - }) - } - } - return -} - -// BatchWriteItem API operation for Amazon DynamoDB. -// -// The BatchWriteItem operation puts or deletes multiple items in one or more -// tables. A single call to BatchWriteItem can transmit up to 16MB of data over -// the network, consisting of up to 25 item put or delete operations. While -// individual items can be up to 400 KB once stored, it's important to note -// that an item's representation might be greater than 400KB while being sent -// in DynamoDB's JSON format for the API call. For more details on this distinction, -// see Naming Rules and Data Types (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.NamingRulesDataTypes.html). -// -// BatchWriteItem cannot update items. If you perform a BatchWriteItem operation -// on an existing item, that item's values will be overwritten by the operation -// and it will appear like it was updated. To update items, we recommend you -// use the UpdateItem action. -// -// The individual PutItem and DeleteItem operations specified in BatchWriteItem -// are atomic; however BatchWriteItem as a whole is not. If any requested operations -// fail because the table's provisioned throughput is exceeded or an internal -// processing failure occurs, the failed operations are returned in the UnprocessedItems -// response parameter. You can investigate and optionally resend the requests. -// Typically, you would call BatchWriteItem in a loop. Each iteration would -// check for unprocessed items and submit a new BatchWriteItem request with -// those unprocessed items until all items have been processed. -// -// For tables and indexes with provisioned capacity, if none of the items can -// be processed due to insufficient provisioned throughput on all of the tables -// in the request, then BatchWriteItem returns a ProvisionedThroughputExceededException. -// For all tables and indexes, if none of the items can be processed due to -// other throttling scenarios (such as exceeding partition level limits), then -// BatchWriteItem returns a ThrottlingException. -// -// If DynamoDB returns any unprocessed items, you should retry the batch operation -// on those items. However, we strongly recommend that you use an exponential -// backoff algorithm. If you retry the batch operation immediately, the underlying -// read or write requests can still fail due to throttling on the individual -// tables. If you delay the batch operation using exponential backoff, the individual -// requests in the batch are much more likely to succeed. -// -// For more information, see Batch Operations and Error Handling (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ErrorHandling.html#Programming.Errors.BatchOperations) -// in the Amazon DynamoDB Developer Guide. -// -// With BatchWriteItem, you can efficiently write or delete large amounts of -// data, such as from Amazon EMR, or copy data from another database into DynamoDB. -// In order to improve performance with these large-scale operations, BatchWriteItem -// does not behave in the same way as individual PutItem and DeleteItem calls -// would. For example, you cannot specify conditions on individual put and delete -// requests, and BatchWriteItem does not return deleted items in the response. -// -// If you use a programming language that supports concurrency, you can use -// threads to write items in parallel. Your application must include the necessary -// logic to manage the threads. With languages that don't support threading, -// you must update or delete the specified items one at a time. In both situations, -// BatchWriteItem performs the specified put and delete operations in parallel, -// giving you the power of the thread pool approach without having to introduce -// complexity into your application. -// -// Parallel processing reduces latency, but each specified put and delete request -// consumes the same number of write capacity units whether it is processed -// in parallel or not. Delete operations on nonexistent items consume one write -// capacity unit. -// -// If one or more of the following is true, DynamoDB rejects the entire batch -// write operation: -// -// - One or more tables specified in the BatchWriteItem request does not -// exist. -// -// - Primary key attributes specified on an item in the request do not match -// those in the corresponding table's primary key schema. -// -// - You try to perform multiple operations on the same item in the same -// BatchWriteItem request. For example, you cannot put and delete the same -// item in the same BatchWriteItem request. -// -// - Your request contains at least two items with identical hash and range -// keys (which essentially is two put operations). -// -// - There are more than 25 requests in the batch. -// -// - Any individual item in a batch exceeds 400 KB. -// -// - The total request size exceeds 16 MB. -// -// - Any individual items with keys exceeding the key length limits. For -// a partition key, the limit is 2048 bytes and for a sort key, the limit -// is 1024 bytes. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB's -// API operation BatchWriteItem for usage and error information. -// -// Returned Error Types: -// -// - ProvisionedThroughputExceededException -// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB -// automatically retry requests that receive this exception. Your request is -// eventually successful, unless your retry queue is too large to finish. Reduce -// the frequency of requests and use exponential backoff. For more information, -// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) -// in the Amazon DynamoDB Developer Guide. -// -// - ResourceNotFoundException -// The operation tried to access a nonexistent table or index. The resource -// might not be specified correctly, or its status might not be ACTIVE. -// -// - ItemCollectionSizeLimitExceededException -// An item collection is too large. This exception is only returned for tables -// that have one or more local secondary indexes. -// -// - RequestLimitExceeded -// Throughput exceeds the current throughput quota for your account. Please -// contact Amazon Web Services Support (https://aws.amazon.com/support) to request -// a quota increase. -// -// - InternalServerError -// An error occurred on the server side. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/BatchWriteItem -func (c *DynamoDB) BatchWriteItem(input *BatchWriteItemInput) (*BatchWriteItemOutput, error) { - req, out := c.BatchWriteItemRequest(input) - return out, req.Send() -} - -// BatchWriteItemWithContext is the same as BatchWriteItem with the addition of -// the ability to pass a context and additional request options. -// -// See BatchWriteItem for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DynamoDB) BatchWriteItemWithContext(ctx aws.Context, input *BatchWriteItemInput, opts ...request.Option) (*BatchWriteItemOutput, error) { - req, out := c.BatchWriteItemRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreateBackup = "CreateBackup" - -// CreateBackupRequest generates a "aws/request.Request" representing the -// client's request for the CreateBackup operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreateBackup for more information on using the CreateBackup -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the CreateBackupRequest method. -// req, resp := client.CreateBackupRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/CreateBackup -func (c *DynamoDB) CreateBackupRequest(input *CreateBackupInput) (req *request.Request, output *CreateBackupOutput) { - op := &request.Operation{ - Name: opCreateBackup, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CreateBackupInput{} - } - - output = &CreateBackupOutput{} - req = c.newRequest(op, input, output) - // if custom endpoint for the request is set to a non empty string, - // we skip the endpoint discovery workflow. - if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { - if aws.BoolValue(req.Config.EnableEndpointDiscovery) { - de := discovererDescribeEndpoints{ - Required: false, - EndpointCache: c.endpointCache, - Params: map[string]*string{ - "op": aws.String(req.Operation.Name), - }, - Client: c, - } - - for k, v := range de.Params { - if v == nil { - delete(de.Params, k) - } - } - - req.Handlers.Build.PushFrontNamed(request.NamedHandler{ - Name: "crr.endpointdiscovery", - Fn: de.Handler, - }) - } - } - return -} - -// CreateBackup API operation for Amazon DynamoDB. -// -// Creates a backup for an existing table. -// -// Each time you create an on-demand backup, the entire table data is backed -// up. There is no limit to the number of on-demand backups that can be taken. -// -// When you create an on-demand backup, a time marker of the request is cataloged, -// and the backup is created asynchronously, by applying all changes until the -// time of the request to the last full table snapshot. Backup requests are -// processed instantaneously and become available for restore within minutes. -// -// You can call CreateBackup at a maximum rate of 50 times per second. -// -// All backups in DynamoDB work without consuming any provisioned throughput -// on the table. -// -// If you submit a backup request on 2018-12-14 at 14:25:00, the backup is guaranteed -// to contain all data committed to the table up to 14:24:00, and data committed -// after 14:26:00 will not be. The backup might contain data modifications made -// between 14:24:00 and 14:26:00. On-demand backup does not support causal consistency. -// -// Along with data, the following are also included on the backups: -// -// - Global secondary indexes (GSIs) -// -// - Local secondary indexes (LSIs) -// -// - Streams -// -// - Provisioned read and write capacity -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB's -// API operation CreateBackup for usage and error information. -// -// Returned Error Types: -// -// - TableNotFoundException -// A source table with the name TableName does not currently exist within the -// subscriber's account or the subscriber is operating in the wrong Amazon Web -// Services Region. -// -// - TableInUseException -// A target table with the specified name is either being created or deleted. -// -// - ContinuousBackupsUnavailableException -// Backups have not yet been enabled for this table. -// -// - BackupInUseException -// There is another ongoing conflicting backup control plane operation on the -// table. The backup is either being created, deleted or restored to a table. -// -// - LimitExceededException -// There is no limit to the number of daily on-demand backups that can be taken. -// -// For most purposes, up to 500 simultaneous table operations are allowed per -// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, -// RestoreTableFromBackup, and RestoreTableToPointInTime. -// -// When you are creating a table with one or more secondary indexes, you can -// have up to 250 such requests running at a time. However, if the table or -// index specifications are complex, then DynamoDB might temporarily reduce -// the number of concurrent operations. -// -// When importing into DynamoDB, up to 50 simultaneous import table operations -// are allowed per account. -// -// There is a soft account quota of 2,500 tables. -// -// GetRecords was called with a value of more than 1000 for the limit request -// parameter. -// -// More than 2 processes are reading from the same streams shard at the same -// time. Exceeding this limit may result in request throttling. -// -// - InternalServerError -// An error occurred on the server side. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/CreateBackup -func (c *DynamoDB) CreateBackup(input *CreateBackupInput) (*CreateBackupOutput, error) { - req, out := c.CreateBackupRequest(input) - return out, req.Send() -} - -// CreateBackupWithContext is the same as CreateBackup with the addition of -// the ability to pass a context and additional request options. -// -// See CreateBackup for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DynamoDB) CreateBackupWithContext(ctx aws.Context, input *CreateBackupInput, opts ...request.Option) (*CreateBackupOutput, error) { - req, out := c.CreateBackupRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreateGlobalTable = "CreateGlobalTable" - -// CreateGlobalTableRequest generates a "aws/request.Request" representing the -// client's request for the CreateGlobalTable operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreateGlobalTable for more information on using the CreateGlobalTable -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the CreateGlobalTableRequest method. -// req, resp := client.CreateGlobalTableRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/CreateGlobalTable -func (c *DynamoDB) CreateGlobalTableRequest(input *CreateGlobalTableInput) (req *request.Request, output *CreateGlobalTableOutput) { - op := &request.Operation{ - Name: opCreateGlobalTable, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CreateGlobalTableInput{} - } - - output = &CreateGlobalTableOutput{} - req = c.newRequest(op, input, output) - // if custom endpoint for the request is set to a non empty string, - // we skip the endpoint discovery workflow. - if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { - if aws.BoolValue(req.Config.EnableEndpointDiscovery) { - de := discovererDescribeEndpoints{ - Required: false, - EndpointCache: c.endpointCache, - Params: map[string]*string{ - "op": aws.String(req.Operation.Name), - }, - Client: c, - } - - for k, v := range de.Params { - if v == nil { - delete(de.Params, k) - } - } - - req.Handlers.Build.PushFrontNamed(request.NamedHandler{ - Name: "crr.endpointdiscovery", - Fn: de.Handler, - }) - } - } - return -} - -// CreateGlobalTable API operation for Amazon DynamoDB. -// -// Creates a global table from an existing table. A global table creates a replication -// relationship between two or more DynamoDB tables with the same table name -// in the provided Regions. -// -// This documentation is for version 2017.11.29 (Legacy) of global tables, which -// should be avoided for new global tables. Customers should use Global Tables -// version 2019.11.21 (Current) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GlobalTables.html) -// when possible, because it provides greater flexibility, higher efficiency, -// and consumes less write capacity than 2017.11.29 (Legacy). -// -// To determine which version you're using, see Determining the global table -// version you are using (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.DetermineVersion.html). -// To update existing global tables from version 2017.11.29 (Legacy) to version -// 2019.11.21 (Current), see Upgrading global tables (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/V2globaltables_upgrade.html). -// -// If you want to add a new replica table to a global table, each of the following -// conditions must be true: -// -// - The table must have the same primary key as all of the other replicas. -// -// - The table must have the same name as all of the other replicas. -// -// - The table must have DynamoDB Streams enabled, with the stream containing -// both the new and the old images of the item. -// -// - None of the replica tables in the global table can contain any data. -// -// If global secondary indexes are specified, then the following conditions -// must also be met: -// -// - The global secondary indexes must have the same name. -// -// - The global secondary indexes must have the same hash key and sort key -// (if present). -// -// If local secondary indexes are specified, then the following conditions must -// also be met: -// -// - The local secondary indexes must have the same name. -// -// - The local secondary indexes must have the same hash key and sort key -// (if present). -// -// Write capacity settings should be set consistently across your replica tables -// and secondary indexes. DynamoDB strongly recommends enabling auto scaling -// to manage the write capacity settings for all of your global tables replicas -// and indexes. -// -// If you prefer to manage write capacity settings manually, you should provision -// equal replicated write capacity units to your replica tables. You should -// also provision equal replicated write capacity units to matching secondary -// indexes across your global table. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB's -// API operation CreateGlobalTable for usage and error information. -// -// Returned Error Types: -// -// - LimitExceededException -// There is no limit to the number of daily on-demand backups that can be taken. -// -// For most purposes, up to 500 simultaneous table operations are allowed per -// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, -// RestoreTableFromBackup, and RestoreTableToPointInTime. -// -// When you are creating a table with one or more secondary indexes, you can -// have up to 250 such requests running at a time. However, if the table or -// index specifications are complex, then DynamoDB might temporarily reduce -// the number of concurrent operations. -// -// When importing into DynamoDB, up to 50 simultaneous import table operations -// are allowed per account. -// -// There is a soft account quota of 2,500 tables. -// -// GetRecords was called with a value of more than 1000 for the limit request -// parameter. -// -// More than 2 processes are reading from the same streams shard at the same -// time. Exceeding this limit may result in request throttling. -// -// - InternalServerError -// An error occurred on the server side. -// -// - GlobalTableAlreadyExistsException -// The specified global table already exists. -// -// - TableNotFoundException -// A source table with the name TableName does not currently exist within the -// subscriber's account or the subscriber is operating in the wrong Amazon Web -// Services Region. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/CreateGlobalTable -func (c *DynamoDB) CreateGlobalTable(input *CreateGlobalTableInput) (*CreateGlobalTableOutput, error) { - req, out := c.CreateGlobalTableRequest(input) - return out, req.Send() -} - -// CreateGlobalTableWithContext is the same as CreateGlobalTable with the addition of -// the ability to pass a context and additional request options. -// -// See CreateGlobalTable for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DynamoDB) CreateGlobalTableWithContext(ctx aws.Context, input *CreateGlobalTableInput, opts ...request.Option) (*CreateGlobalTableOutput, error) { - req, out := c.CreateGlobalTableRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreateTable = "CreateTable" - -// CreateTableRequest generates a "aws/request.Request" representing the -// client's request for the CreateTable operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreateTable for more information on using the CreateTable -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the CreateTableRequest method. -// req, resp := client.CreateTableRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/CreateTable -func (c *DynamoDB) CreateTableRequest(input *CreateTableInput) (req *request.Request, output *CreateTableOutput) { - op := &request.Operation{ - Name: opCreateTable, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CreateTableInput{} - } - - output = &CreateTableOutput{} - req = c.newRequest(op, input, output) - // if custom endpoint for the request is set to a non empty string, - // we skip the endpoint discovery workflow. - if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { - if aws.BoolValue(req.Config.EnableEndpointDiscovery) { - de := discovererDescribeEndpoints{ - Required: false, - EndpointCache: c.endpointCache, - Params: map[string]*string{ - "op": aws.String(req.Operation.Name), - }, - Client: c, - } - - for k, v := range de.Params { - if v == nil { - delete(de.Params, k) - } - } - - req.Handlers.Build.PushFrontNamed(request.NamedHandler{ - Name: "crr.endpointdiscovery", - Fn: de.Handler, - }) - } - } - return -} - -// CreateTable API operation for Amazon DynamoDB. -// -// The CreateTable operation adds a new table to your account. In an Amazon -// Web Services account, table names must be unique within each Region. That -// is, you can have two tables with same name if you create the tables in different -// Regions. -// -// CreateTable is an asynchronous operation. Upon receiving a CreateTable request, -// DynamoDB immediately returns a response with a TableStatus of CREATING. After -// the table is created, DynamoDB sets the TableStatus to ACTIVE. You can perform -// read and write operations only on an ACTIVE table. -// -// You can optionally define secondary indexes on the new table, as part of -// the CreateTable operation. If you want to create multiple tables with secondary -// indexes on them, you must create the tables sequentially. Only one table -// with secondary indexes can be in the CREATING state at any given time. -// -// You can use the DescribeTable action to check the table status. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB's -// API operation CreateTable for usage and error information. -// -// Returned Error Types: -// -// - ResourceInUseException -// The operation conflicts with the resource's availability. For example, you -// attempted to recreate an existing table, or tried to delete a table currently -// in the CREATING state. -// -// - LimitExceededException -// There is no limit to the number of daily on-demand backups that can be taken. -// -// For most purposes, up to 500 simultaneous table operations are allowed per -// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, -// RestoreTableFromBackup, and RestoreTableToPointInTime. -// -// When you are creating a table with one or more secondary indexes, you can -// have up to 250 such requests running at a time. However, if the table or -// index specifications are complex, then DynamoDB might temporarily reduce -// the number of concurrent operations. -// -// When importing into DynamoDB, up to 50 simultaneous import table operations -// are allowed per account. -// -// There is a soft account quota of 2,500 tables. -// -// GetRecords was called with a value of more than 1000 for the limit request -// parameter. -// -// More than 2 processes are reading from the same streams shard at the same -// time. Exceeding this limit may result in request throttling. -// -// - InternalServerError -// An error occurred on the server side. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/CreateTable -func (c *DynamoDB) CreateTable(input *CreateTableInput) (*CreateTableOutput, error) { - req, out := c.CreateTableRequest(input) - return out, req.Send() -} - -// CreateTableWithContext is the same as CreateTable with the addition of -// the ability to pass a context and additional request options. -// -// See CreateTable for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DynamoDB) CreateTableWithContext(ctx aws.Context, input *CreateTableInput, opts ...request.Option) (*CreateTableOutput, error) { - req, out := c.CreateTableRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteBackup = "DeleteBackup" - -// DeleteBackupRequest generates a "aws/request.Request" representing the -// client's request for the DeleteBackup operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteBackup for more information on using the DeleteBackup -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the DeleteBackupRequest method. -// req, resp := client.DeleteBackupRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DeleteBackup -func (c *DynamoDB) DeleteBackupRequest(input *DeleteBackupInput) (req *request.Request, output *DeleteBackupOutput) { - op := &request.Operation{ - Name: opDeleteBackup, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DeleteBackupInput{} - } - - output = &DeleteBackupOutput{} - req = c.newRequest(op, input, output) - // if custom endpoint for the request is set to a non empty string, - // we skip the endpoint discovery workflow. - if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { - if aws.BoolValue(req.Config.EnableEndpointDiscovery) { - de := discovererDescribeEndpoints{ - Required: false, - EndpointCache: c.endpointCache, - Params: map[string]*string{ - "op": aws.String(req.Operation.Name), - }, - Client: c, - } - - for k, v := range de.Params { - if v == nil { - delete(de.Params, k) - } - } - - req.Handlers.Build.PushFrontNamed(request.NamedHandler{ - Name: "crr.endpointdiscovery", - Fn: de.Handler, - }) - } - } - return -} - -// DeleteBackup API operation for Amazon DynamoDB. -// -// Deletes an existing backup of a table. -// -// You can call DeleteBackup at a maximum rate of 10 times per second. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB's -// API operation DeleteBackup for usage and error information. -// -// Returned Error Types: -// -// - BackupNotFoundException -// Backup not found for the given BackupARN. -// -// - BackupInUseException -// There is another ongoing conflicting backup control plane operation on the -// table. The backup is either being created, deleted or restored to a table. -// -// - LimitExceededException -// There is no limit to the number of daily on-demand backups that can be taken. -// -// For most purposes, up to 500 simultaneous table operations are allowed per -// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, -// RestoreTableFromBackup, and RestoreTableToPointInTime. -// -// When you are creating a table with one or more secondary indexes, you can -// have up to 250 such requests running at a time. However, if the table or -// index specifications are complex, then DynamoDB might temporarily reduce -// the number of concurrent operations. -// -// When importing into DynamoDB, up to 50 simultaneous import table operations -// are allowed per account. -// -// There is a soft account quota of 2,500 tables. -// -// GetRecords was called with a value of more than 1000 for the limit request -// parameter. -// -// More than 2 processes are reading from the same streams shard at the same -// time. Exceeding this limit may result in request throttling. -// -// - InternalServerError -// An error occurred on the server side. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DeleteBackup -func (c *DynamoDB) DeleteBackup(input *DeleteBackupInput) (*DeleteBackupOutput, error) { - req, out := c.DeleteBackupRequest(input) - return out, req.Send() -} - -// DeleteBackupWithContext is the same as DeleteBackup with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteBackup for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DynamoDB) DeleteBackupWithContext(ctx aws.Context, input *DeleteBackupInput, opts ...request.Option) (*DeleteBackupOutput, error) { - req, out := c.DeleteBackupRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteItem = "DeleteItem" - -// DeleteItemRequest generates a "aws/request.Request" representing the -// client's request for the DeleteItem operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteItem for more information on using the DeleteItem -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the DeleteItemRequest method. -// req, resp := client.DeleteItemRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DeleteItem -func (c *DynamoDB) DeleteItemRequest(input *DeleteItemInput) (req *request.Request, output *DeleteItemOutput) { - op := &request.Operation{ - Name: opDeleteItem, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DeleteItemInput{} - } - - output = &DeleteItemOutput{} - req = c.newRequest(op, input, output) - // if custom endpoint for the request is set to a non empty string, - // we skip the endpoint discovery workflow. - if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { - if aws.BoolValue(req.Config.EnableEndpointDiscovery) { - de := discovererDescribeEndpoints{ - Required: false, - EndpointCache: c.endpointCache, - Params: map[string]*string{ - "op": aws.String(req.Operation.Name), - }, - Client: c, - } - - for k, v := range de.Params { - if v == nil { - delete(de.Params, k) - } - } - - req.Handlers.Build.PushFrontNamed(request.NamedHandler{ - Name: "crr.endpointdiscovery", - Fn: de.Handler, - }) - } - } - return -} - -// DeleteItem API operation for Amazon DynamoDB. -// -// Deletes a single item in a table by primary key. You can perform a conditional -// delete operation that deletes the item if it exists, or if it has an expected -// attribute value. -// -// In addition to deleting an item, you can also return the item's attribute -// values in the same operation, using the ReturnValues parameter. -// -// Unless you specify conditions, the DeleteItem is an idempotent operation; -// running it multiple times on the same item or attribute does not result in -// an error response. -// -// Conditional deletes are useful for deleting items only if specific conditions -// are met. If those conditions are met, DynamoDB performs the delete. Otherwise, -// the item is not deleted. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB's -// API operation DeleteItem for usage and error information. -// -// Returned Error Types: -// -// - ConditionalCheckFailedException -// A condition specified in the operation could not be evaluated. -// -// - ProvisionedThroughputExceededException -// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB -// automatically retry requests that receive this exception. Your request is -// eventually successful, unless your retry queue is too large to finish. Reduce -// the frequency of requests and use exponential backoff. For more information, -// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) -// in the Amazon DynamoDB Developer Guide. -// -// - ResourceNotFoundException -// The operation tried to access a nonexistent table or index. The resource -// might not be specified correctly, or its status might not be ACTIVE. -// -// - ItemCollectionSizeLimitExceededException -// An item collection is too large. This exception is only returned for tables -// that have one or more local secondary indexes. -// -// - TransactionConflictException -// Operation was rejected because there is an ongoing transaction for the item. -// -// - RequestLimitExceeded -// Throughput exceeds the current throughput quota for your account. Please -// contact Amazon Web Services Support (https://aws.amazon.com/support) to request -// a quota increase. -// -// - InternalServerError -// An error occurred on the server side. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DeleteItem -func (c *DynamoDB) DeleteItem(input *DeleteItemInput) (*DeleteItemOutput, error) { - req, out := c.DeleteItemRequest(input) - return out, req.Send() -} - -// DeleteItemWithContext is the same as DeleteItem with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteItem for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DynamoDB) DeleteItemWithContext(ctx aws.Context, input *DeleteItemInput, opts ...request.Option) (*DeleteItemOutput, error) { - req, out := c.DeleteItemRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteResourcePolicy = "DeleteResourcePolicy" - -// DeleteResourcePolicyRequest generates a "aws/request.Request" representing the -// client's request for the DeleteResourcePolicy operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteResourcePolicy for more information on using the DeleteResourcePolicy -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the DeleteResourcePolicyRequest method. -// req, resp := client.DeleteResourcePolicyRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DeleteResourcePolicy -func (c *DynamoDB) DeleteResourcePolicyRequest(input *DeleteResourcePolicyInput) (req *request.Request, output *DeleteResourcePolicyOutput) { - op := &request.Operation{ - Name: opDeleteResourcePolicy, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DeleteResourcePolicyInput{} - } - - output = &DeleteResourcePolicyOutput{} - req = c.newRequest(op, input, output) - // if custom endpoint for the request is set to a non empty string, - // we skip the endpoint discovery workflow. - if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { - if aws.BoolValue(req.Config.EnableEndpointDiscovery) { - de := discovererDescribeEndpoints{ - Required: false, - EndpointCache: c.endpointCache, - Params: map[string]*string{ - "op": aws.String(req.Operation.Name), - }, - Client: c, - } - - for k, v := range de.Params { - if v == nil { - delete(de.Params, k) - } - } - - req.Handlers.Build.PushFrontNamed(request.NamedHandler{ - Name: "crr.endpointdiscovery", - Fn: de.Handler, - }) - } - } - return -} - -// DeleteResourcePolicy API operation for Amazon DynamoDB. -// -// Deletes the resource-based policy attached to the resource, which can be -// a table or stream. -// -// DeleteResourcePolicy is an idempotent operation; running it multiple times -// on the same resource doesn't result in an error response, unless you specify -// an ExpectedRevisionId, which will then return a PolicyNotFoundException. -// -// To make sure that you don't inadvertently lock yourself out of your own resources, -// the root principal in your Amazon Web Services account can perform DeleteResourcePolicy -// requests, even if your resource-based policy explicitly denies the root principal's -// access. -// -// DeleteResourcePolicy is an asynchronous operation. If you issue a GetResourcePolicy -// request immediately after running the DeleteResourcePolicy request, DynamoDB -// might still return the deleted policy. This is because the policy for your -// resource might not have been deleted yet. Wait for a few seconds, and then -// try the GetResourcePolicy request again. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB's -// API operation DeleteResourcePolicy for usage and error information. -// -// Returned Error Types: -// -// - ResourceNotFoundException -// The operation tried to access a nonexistent table or index. The resource -// might not be specified correctly, or its status might not be ACTIVE. -// -// - InternalServerError -// An error occurred on the server side. -// -// - PolicyNotFoundException -// The operation tried to access a nonexistent resource-based policy. -// -// If you specified an ExpectedRevisionId, it's possible that a policy is present -// for the resource but its revision ID didn't match the expected value. -// -// - ResourceInUseException -// The operation conflicts with the resource's availability. For example, you -// attempted to recreate an existing table, or tried to delete a table currently -// in the CREATING state. -// -// - LimitExceededException -// There is no limit to the number of daily on-demand backups that can be taken. -// -// For most purposes, up to 500 simultaneous table operations are allowed per -// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, -// RestoreTableFromBackup, and RestoreTableToPointInTime. -// -// When you are creating a table with one or more secondary indexes, you can -// have up to 250 such requests running at a time. However, if the table or -// index specifications are complex, then DynamoDB might temporarily reduce -// the number of concurrent operations. -// -// When importing into DynamoDB, up to 50 simultaneous import table operations -// are allowed per account. -// -// There is a soft account quota of 2,500 tables. -// -// GetRecords was called with a value of more than 1000 for the limit request -// parameter. -// -// More than 2 processes are reading from the same streams shard at the same -// time. Exceeding this limit may result in request throttling. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DeleteResourcePolicy -func (c *DynamoDB) DeleteResourcePolicy(input *DeleteResourcePolicyInput) (*DeleteResourcePolicyOutput, error) { - req, out := c.DeleteResourcePolicyRequest(input) - return out, req.Send() -} - -// DeleteResourcePolicyWithContext is the same as DeleteResourcePolicy with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteResourcePolicy for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DynamoDB) DeleteResourcePolicyWithContext(ctx aws.Context, input *DeleteResourcePolicyInput, opts ...request.Option) (*DeleteResourcePolicyOutput, error) { - req, out := c.DeleteResourcePolicyRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteTable = "DeleteTable" - -// DeleteTableRequest generates a "aws/request.Request" representing the -// client's request for the DeleteTable operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteTable for more information on using the DeleteTable -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the DeleteTableRequest method. -// req, resp := client.DeleteTableRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DeleteTable -func (c *DynamoDB) DeleteTableRequest(input *DeleteTableInput) (req *request.Request, output *DeleteTableOutput) { - op := &request.Operation{ - Name: opDeleteTable, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DeleteTableInput{} - } - - output = &DeleteTableOutput{} - req = c.newRequest(op, input, output) - // if custom endpoint for the request is set to a non empty string, - // we skip the endpoint discovery workflow. - if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { - if aws.BoolValue(req.Config.EnableEndpointDiscovery) { - de := discovererDescribeEndpoints{ - Required: false, - EndpointCache: c.endpointCache, - Params: map[string]*string{ - "op": aws.String(req.Operation.Name), - }, - Client: c, - } - - for k, v := range de.Params { - if v == nil { - delete(de.Params, k) - } - } - - req.Handlers.Build.PushFrontNamed(request.NamedHandler{ - Name: "crr.endpointdiscovery", - Fn: de.Handler, - }) - } - } - return -} - -// DeleteTable API operation for Amazon DynamoDB. -// -// The DeleteTable operation deletes a table and all of its items. After a DeleteTable -// request, the specified table is in the DELETING state until DynamoDB completes -// the deletion. If the table is in the ACTIVE state, you can delete it. If -// a table is in CREATING or UPDATING states, then DynamoDB returns a ResourceInUseException. -// If the specified table does not exist, DynamoDB returns a ResourceNotFoundException. -// If table is already in the DELETING state, no error is returned. -// -// For global tables, this operation only applies to global tables using Version -// 2019.11.21 (Current version). -// -// DynamoDB might continue to accept data read and write operations, such as -// GetItem and PutItem, on a table in the DELETING state until the table deletion -// is complete. For the full list of table states, see TableStatus (https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_TableDescription.html#DDB-Type-TableDescription-TableStatus). -// -// When you delete a table, any indexes on that table are also deleted. -// -// If you have DynamoDB Streams enabled on the table, then the corresponding -// stream on that table goes into the DISABLED state, and the stream is automatically -// deleted after 24 hours. -// -// Use the DescribeTable action to check the status of the table. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB's -// API operation DeleteTable for usage and error information. -// -// Returned Error Types: -// -// - ResourceInUseException -// The operation conflicts with the resource's availability. For example, you -// attempted to recreate an existing table, or tried to delete a table currently -// in the CREATING state. -// -// - ResourceNotFoundException -// The operation tried to access a nonexistent table or index. The resource -// might not be specified correctly, or its status might not be ACTIVE. -// -// - LimitExceededException -// There is no limit to the number of daily on-demand backups that can be taken. -// -// For most purposes, up to 500 simultaneous table operations are allowed per -// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, -// RestoreTableFromBackup, and RestoreTableToPointInTime. -// -// When you are creating a table with one or more secondary indexes, you can -// have up to 250 such requests running at a time. However, if the table or -// index specifications are complex, then DynamoDB might temporarily reduce -// the number of concurrent operations. -// -// When importing into DynamoDB, up to 50 simultaneous import table operations -// are allowed per account. -// -// There is a soft account quota of 2,500 tables. -// -// GetRecords was called with a value of more than 1000 for the limit request -// parameter. -// -// More than 2 processes are reading from the same streams shard at the same -// time. Exceeding this limit may result in request throttling. -// -// - InternalServerError -// An error occurred on the server side. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DeleteTable -func (c *DynamoDB) DeleteTable(input *DeleteTableInput) (*DeleteTableOutput, error) { - req, out := c.DeleteTableRequest(input) - return out, req.Send() -} - -// DeleteTableWithContext is the same as DeleteTable with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteTable for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DynamoDB) DeleteTableWithContext(ctx aws.Context, input *DeleteTableInput, opts ...request.Option) (*DeleteTableOutput, error) { - req, out := c.DeleteTableRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDescribeBackup = "DescribeBackup" - -// DescribeBackupRequest generates a "aws/request.Request" representing the -// client's request for the DescribeBackup operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeBackup for more information on using the DescribeBackup -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the DescribeBackupRequest method. -// req, resp := client.DescribeBackupRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeBackup -func (c *DynamoDB) DescribeBackupRequest(input *DescribeBackupInput) (req *request.Request, output *DescribeBackupOutput) { - op := &request.Operation{ - Name: opDescribeBackup, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DescribeBackupInput{} - } - - output = &DescribeBackupOutput{} - req = c.newRequest(op, input, output) - // if custom endpoint for the request is set to a non empty string, - // we skip the endpoint discovery workflow. - if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { - if aws.BoolValue(req.Config.EnableEndpointDiscovery) { - de := discovererDescribeEndpoints{ - Required: false, - EndpointCache: c.endpointCache, - Params: map[string]*string{ - "op": aws.String(req.Operation.Name), - }, - Client: c, - } - - for k, v := range de.Params { - if v == nil { - delete(de.Params, k) - } - } - - req.Handlers.Build.PushFrontNamed(request.NamedHandler{ - Name: "crr.endpointdiscovery", - Fn: de.Handler, - }) - } - } - return -} - -// DescribeBackup API operation for Amazon DynamoDB. -// -// Describes an existing backup of a table. -// -// You can call DescribeBackup at a maximum rate of 10 times per second. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB's -// API operation DescribeBackup for usage and error information. -// -// Returned Error Types: -// -// - BackupNotFoundException -// Backup not found for the given BackupARN. -// -// - InternalServerError -// An error occurred on the server side. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeBackup -func (c *DynamoDB) DescribeBackup(input *DescribeBackupInput) (*DescribeBackupOutput, error) { - req, out := c.DescribeBackupRequest(input) - return out, req.Send() -} - -// DescribeBackupWithContext is the same as DescribeBackup with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeBackup for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DynamoDB) DescribeBackupWithContext(ctx aws.Context, input *DescribeBackupInput, opts ...request.Option) (*DescribeBackupOutput, error) { - req, out := c.DescribeBackupRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDescribeContinuousBackups = "DescribeContinuousBackups" - -// DescribeContinuousBackupsRequest generates a "aws/request.Request" representing the -// client's request for the DescribeContinuousBackups operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeContinuousBackups for more information on using the DescribeContinuousBackups -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the DescribeContinuousBackupsRequest method. -// req, resp := client.DescribeContinuousBackupsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeContinuousBackups -func (c *DynamoDB) DescribeContinuousBackupsRequest(input *DescribeContinuousBackupsInput) (req *request.Request, output *DescribeContinuousBackupsOutput) { - op := &request.Operation{ - Name: opDescribeContinuousBackups, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DescribeContinuousBackupsInput{} - } - - output = &DescribeContinuousBackupsOutput{} - req = c.newRequest(op, input, output) - // if custom endpoint for the request is set to a non empty string, - // we skip the endpoint discovery workflow. - if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { - if aws.BoolValue(req.Config.EnableEndpointDiscovery) { - de := discovererDescribeEndpoints{ - Required: false, - EndpointCache: c.endpointCache, - Params: map[string]*string{ - "op": aws.String(req.Operation.Name), - }, - Client: c, - } - - for k, v := range de.Params { - if v == nil { - delete(de.Params, k) - } - } - - req.Handlers.Build.PushFrontNamed(request.NamedHandler{ - Name: "crr.endpointdiscovery", - Fn: de.Handler, - }) - } - } - return -} - -// DescribeContinuousBackups API operation for Amazon DynamoDB. -// -// Checks the status of continuous backups and point in time recovery on the -// specified table. Continuous backups are ENABLED on all tables at table creation. -// If point in time recovery is enabled, PointInTimeRecoveryStatus will be set -// to ENABLED. -// -// After continuous backups and point in time recovery are enabled, you can -// restore to any point in time within EarliestRestorableDateTime and LatestRestorableDateTime. -// -// LatestRestorableDateTime is typically 5 minutes before the current time. -// You can restore your table to any point in time during the last 35 days. -// -// You can call DescribeContinuousBackups at a maximum rate of 10 times per -// second. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB's -// API operation DescribeContinuousBackups for usage and error information. -// -// Returned Error Types: -// -// - TableNotFoundException -// A source table with the name TableName does not currently exist within the -// subscriber's account or the subscriber is operating in the wrong Amazon Web -// Services Region. -// -// - InternalServerError -// An error occurred on the server side. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeContinuousBackups -func (c *DynamoDB) DescribeContinuousBackups(input *DescribeContinuousBackupsInput) (*DescribeContinuousBackupsOutput, error) { - req, out := c.DescribeContinuousBackupsRequest(input) - return out, req.Send() -} - -// DescribeContinuousBackupsWithContext is the same as DescribeContinuousBackups with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeContinuousBackups for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DynamoDB) DescribeContinuousBackupsWithContext(ctx aws.Context, input *DescribeContinuousBackupsInput, opts ...request.Option) (*DescribeContinuousBackupsOutput, error) { - req, out := c.DescribeContinuousBackupsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDescribeContributorInsights = "DescribeContributorInsights" - -// DescribeContributorInsightsRequest generates a "aws/request.Request" representing the -// client's request for the DescribeContributorInsights operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeContributorInsights for more information on using the DescribeContributorInsights -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the DescribeContributorInsightsRequest method. -// req, resp := client.DescribeContributorInsightsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeContributorInsights -func (c *DynamoDB) DescribeContributorInsightsRequest(input *DescribeContributorInsightsInput) (req *request.Request, output *DescribeContributorInsightsOutput) { - op := &request.Operation{ - Name: opDescribeContributorInsights, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DescribeContributorInsightsInput{} - } - - output = &DescribeContributorInsightsOutput{} - req = c.newRequest(op, input, output) - return -} - -// DescribeContributorInsights API operation for Amazon DynamoDB. -// -// Returns information about contributor insights for a given table or global -// secondary index. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB's -// API operation DescribeContributorInsights for usage and error information. -// -// Returned Error Types: -// -// - ResourceNotFoundException -// The operation tried to access a nonexistent table or index. The resource -// might not be specified correctly, or its status might not be ACTIVE. -// -// - InternalServerError -// An error occurred on the server side. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeContributorInsights -func (c *DynamoDB) DescribeContributorInsights(input *DescribeContributorInsightsInput) (*DescribeContributorInsightsOutput, error) { - req, out := c.DescribeContributorInsightsRequest(input) - return out, req.Send() -} - -// DescribeContributorInsightsWithContext is the same as DescribeContributorInsights with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeContributorInsights for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DynamoDB) DescribeContributorInsightsWithContext(ctx aws.Context, input *DescribeContributorInsightsInput, opts ...request.Option) (*DescribeContributorInsightsOutput, error) { - req, out := c.DescribeContributorInsightsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDescribeEndpoints = "DescribeEndpoints" - -// DescribeEndpointsRequest generates a "aws/request.Request" representing the -// client's request for the DescribeEndpoints operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeEndpoints for more information on using the DescribeEndpoints -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the DescribeEndpointsRequest method. -// req, resp := client.DescribeEndpointsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeEndpoints -func (c *DynamoDB) DescribeEndpointsRequest(input *DescribeEndpointsInput) (req *request.Request, output *DescribeEndpointsOutput) { - op := &request.Operation{ - Name: opDescribeEndpoints, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DescribeEndpointsInput{} - } - - output = &DescribeEndpointsOutput{} - req = c.newRequest(op, input, output) - return -} - -// DescribeEndpoints API operation for Amazon DynamoDB. -// -// Returns the regional endpoint information. For more information on policy -// permissions, please see Internetwork traffic privacy (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/inter-network-traffic-privacy.html#inter-network-traffic-DescribeEndpoints). -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB's -// API operation DescribeEndpoints for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeEndpoints -func (c *DynamoDB) DescribeEndpoints(input *DescribeEndpointsInput) (*DescribeEndpointsOutput, error) { - req, out := c.DescribeEndpointsRequest(input) - return out, req.Send() -} - -// DescribeEndpointsWithContext is the same as DescribeEndpoints with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeEndpoints for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DynamoDB) DescribeEndpointsWithContext(ctx aws.Context, input *DescribeEndpointsInput, opts ...request.Option) (*DescribeEndpointsOutput, error) { - req, out := c.DescribeEndpointsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -type discovererDescribeEndpoints struct { - Client *DynamoDB - Required bool - EndpointCache *crr.EndpointCache - Params map[string]*string - Key string - req *request.Request -} - -func (d *discovererDescribeEndpoints) Discover() (crr.Endpoint, error) { - input := &DescribeEndpointsInput{} - - resp, err := d.Client.DescribeEndpoints(input) - if err != nil { - return crr.Endpoint{}, err - } - - endpoint := crr.Endpoint{ - Key: d.Key, - } - - for _, e := range resp.Endpoints { - if e.Address == nil { - continue - } - - address := *e.Address - - var scheme string - if idx := strings.Index(address, "://"); idx != -1 { - scheme = address[:idx] - } - - if len(scheme) == 0 { - address = fmt.Sprintf("%s://%s", d.req.HTTPRequest.URL.Scheme, address) - } - - cachedInMinutes := aws.Int64Value(e.CachePeriodInMinutes) - u, err := url.Parse(address) - if err != nil { - continue - } - - addr := crr.WeightedAddress{ - URL: u, - Expired: time.Now().Add(time.Duration(cachedInMinutes) * time.Minute), - } - - endpoint.Add(addr) - } - - d.EndpointCache.Add(endpoint) - - return endpoint, nil -} - -func (d *discovererDescribeEndpoints) Handler(r *request.Request) { - endpointKey := crr.BuildEndpointKey(d.Params) - d.Key = endpointKey - d.req = r - - endpoint, err := d.EndpointCache.Get(d, endpointKey, d.Required) - if err != nil { - r.Error = err - return - } - - if endpoint.URL != nil && len(endpoint.URL.String()) > 0 { - r.HTTPRequest.URL = endpoint.URL - } -} - -const opDescribeExport = "DescribeExport" - -// DescribeExportRequest generates a "aws/request.Request" representing the -// client's request for the DescribeExport operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeExport for more information on using the DescribeExport -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the DescribeExportRequest method. -// req, resp := client.DescribeExportRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeExport -func (c *DynamoDB) DescribeExportRequest(input *DescribeExportInput) (req *request.Request, output *DescribeExportOutput) { - op := &request.Operation{ - Name: opDescribeExport, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DescribeExportInput{} - } - - output = &DescribeExportOutput{} - req = c.newRequest(op, input, output) - return -} - -// DescribeExport API operation for Amazon DynamoDB. -// -// Describes an existing table export. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB's -// API operation DescribeExport for usage and error information. -// -// Returned Error Types: -// -// - ExportNotFoundException -// The specified export was not found. -// -// - LimitExceededException -// There is no limit to the number of daily on-demand backups that can be taken. -// -// For most purposes, up to 500 simultaneous table operations are allowed per -// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, -// RestoreTableFromBackup, and RestoreTableToPointInTime. -// -// When you are creating a table with one or more secondary indexes, you can -// have up to 250 such requests running at a time. However, if the table or -// index specifications are complex, then DynamoDB might temporarily reduce -// the number of concurrent operations. -// -// When importing into DynamoDB, up to 50 simultaneous import table operations -// are allowed per account. -// -// There is a soft account quota of 2,500 tables. -// -// GetRecords was called with a value of more than 1000 for the limit request -// parameter. -// -// More than 2 processes are reading from the same streams shard at the same -// time. Exceeding this limit may result in request throttling. -// -// - InternalServerError -// An error occurred on the server side. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeExport -func (c *DynamoDB) DescribeExport(input *DescribeExportInput) (*DescribeExportOutput, error) { - req, out := c.DescribeExportRequest(input) - return out, req.Send() -} - -// DescribeExportWithContext is the same as DescribeExport with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeExport for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DynamoDB) DescribeExportWithContext(ctx aws.Context, input *DescribeExportInput, opts ...request.Option) (*DescribeExportOutput, error) { - req, out := c.DescribeExportRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDescribeGlobalTable = "DescribeGlobalTable" - -// DescribeGlobalTableRequest generates a "aws/request.Request" representing the -// client's request for the DescribeGlobalTable operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeGlobalTable for more information on using the DescribeGlobalTable -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the DescribeGlobalTableRequest method. -// req, resp := client.DescribeGlobalTableRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeGlobalTable -func (c *DynamoDB) DescribeGlobalTableRequest(input *DescribeGlobalTableInput) (req *request.Request, output *DescribeGlobalTableOutput) { - op := &request.Operation{ - Name: opDescribeGlobalTable, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DescribeGlobalTableInput{} - } - - output = &DescribeGlobalTableOutput{} - req = c.newRequest(op, input, output) - // if custom endpoint for the request is set to a non empty string, - // we skip the endpoint discovery workflow. - if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { - if aws.BoolValue(req.Config.EnableEndpointDiscovery) { - de := discovererDescribeEndpoints{ - Required: false, - EndpointCache: c.endpointCache, - Params: map[string]*string{ - "op": aws.String(req.Operation.Name), - }, - Client: c, - } - - for k, v := range de.Params { - if v == nil { - delete(de.Params, k) - } - } - - req.Handlers.Build.PushFrontNamed(request.NamedHandler{ - Name: "crr.endpointdiscovery", - Fn: de.Handler, - }) - } - } - return -} - -// DescribeGlobalTable API operation for Amazon DynamoDB. -// -// Returns information about the specified global table. -// -// This documentation is for version 2017.11.29 (Legacy) of global tables, which -// should be avoided for new global tables. Customers should use Global Tables -// version 2019.11.21 (Current) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GlobalTables.html) -// when possible, because it provides greater flexibility, higher efficiency, -// and consumes less write capacity than 2017.11.29 (Legacy). -// -// To determine which version you're using, see Determining the global table -// version you are using (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.DetermineVersion.html). -// To update existing global tables from version 2017.11.29 (Legacy) to version -// 2019.11.21 (Current), see Upgrading global tables (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/V2globaltables_upgrade.html). -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB's -// API operation DescribeGlobalTable for usage and error information. -// -// Returned Error Types: -// -// - InternalServerError -// An error occurred on the server side. -// -// - GlobalTableNotFoundException -// The specified global table does not exist. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeGlobalTable -func (c *DynamoDB) DescribeGlobalTable(input *DescribeGlobalTableInput) (*DescribeGlobalTableOutput, error) { - req, out := c.DescribeGlobalTableRequest(input) - return out, req.Send() -} - -// DescribeGlobalTableWithContext is the same as DescribeGlobalTable with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeGlobalTable for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DynamoDB) DescribeGlobalTableWithContext(ctx aws.Context, input *DescribeGlobalTableInput, opts ...request.Option) (*DescribeGlobalTableOutput, error) { - req, out := c.DescribeGlobalTableRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDescribeGlobalTableSettings = "DescribeGlobalTableSettings" - -// DescribeGlobalTableSettingsRequest generates a "aws/request.Request" representing the -// client's request for the DescribeGlobalTableSettings operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeGlobalTableSettings for more information on using the DescribeGlobalTableSettings -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the DescribeGlobalTableSettingsRequest method. -// req, resp := client.DescribeGlobalTableSettingsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeGlobalTableSettings -func (c *DynamoDB) DescribeGlobalTableSettingsRequest(input *DescribeGlobalTableSettingsInput) (req *request.Request, output *DescribeGlobalTableSettingsOutput) { - op := &request.Operation{ - Name: opDescribeGlobalTableSettings, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DescribeGlobalTableSettingsInput{} - } - - output = &DescribeGlobalTableSettingsOutput{} - req = c.newRequest(op, input, output) - // if custom endpoint for the request is set to a non empty string, - // we skip the endpoint discovery workflow. - if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { - if aws.BoolValue(req.Config.EnableEndpointDiscovery) { - de := discovererDescribeEndpoints{ - Required: false, - EndpointCache: c.endpointCache, - Params: map[string]*string{ - "op": aws.String(req.Operation.Name), - }, - Client: c, - } - - for k, v := range de.Params { - if v == nil { - delete(de.Params, k) - } - } - - req.Handlers.Build.PushFrontNamed(request.NamedHandler{ - Name: "crr.endpointdiscovery", - Fn: de.Handler, - }) - } - } - return -} - -// DescribeGlobalTableSettings API operation for Amazon DynamoDB. -// -// Describes Region-specific settings for a global table. -// -// This documentation is for version 2017.11.29 (Legacy) of global tables, which -// should be avoided for new global tables. Customers should use Global Tables -// version 2019.11.21 (Current) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GlobalTables.html) -// when possible, because it provides greater flexibility, higher efficiency, -// and consumes less write capacity than 2017.11.29 (Legacy). -// -// To determine which version you're using, see Determining the global table -// version you are using (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.DetermineVersion.html). -// To update existing global tables from version 2017.11.29 (Legacy) to version -// 2019.11.21 (Current), see Upgrading global tables (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/V2globaltables_upgrade.html). -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB's -// API operation DescribeGlobalTableSettings for usage and error information. -// -// Returned Error Types: -// -// - GlobalTableNotFoundException -// The specified global table does not exist. -// -// - InternalServerError -// An error occurred on the server side. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeGlobalTableSettings -func (c *DynamoDB) DescribeGlobalTableSettings(input *DescribeGlobalTableSettingsInput) (*DescribeGlobalTableSettingsOutput, error) { - req, out := c.DescribeGlobalTableSettingsRequest(input) - return out, req.Send() -} - -// DescribeGlobalTableSettingsWithContext is the same as DescribeGlobalTableSettings with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeGlobalTableSettings for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DynamoDB) DescribeGlobalTableSettingsWithContext(ctx aws.Context, input *DescribeGlobalTableSettingsInput, opts ...request.Option) (*DescribeGlobalTableSettingsOutput, error) { - req, out := c.DescribeGlobalTableSettingsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDescribeImport = "DescribeImport" - -// DescribeImportRequest generates a "aws/request.Request" representing the -// client's request for the DescribeImport operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeImport for more information on using the DescribeImport -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the DescribeImportRequest method. -// req, resp := client.DescribeImportRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeImport -func (c *DynamoDB) DescribeImportRequest(input *DescribeImportInput) (req *request.Request, output *DescribeImportOutput) { - op := &request.Operation{ - Name: opDescribeImport, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DescribeImportInput{} - } - - output = &DescribeImportOutput{} - req = c.newRequest(op, input, output) - return -} - -// DescribeImport API operation for Amazon DynamoDB. -// -// Represents the properties of the import. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB's -// API operation DescribeImport for usage and error information. -// -// Returned Error Types: -// - ImportNotFoundException -// The specified import was not found. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeImport -func (c *DynamoDB) DescribeImport(input *DescribeImportInput) (*DescribeImportOutput, error) { - req, out := c.DescribeImportRequest(input) - return out, req.Send() -} - -// DescribeImportWithContext is the same as DescribeImport with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeImport for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DynamoDB) DescribeImportWithContext(ctx aws.Context, input *DescribeImportInput, opts ...request.Option) (*DescribeImportOutput, error) { - req, out := c.DescribeImportRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDescribeKinesisStreamingDestination = "DescribeKinesisStreamingDestination" - -// DescribeKinesisStreamingDestinationRequest generates a "aws/request.Request" representing the -// client's request for the DescribeKinesisStreamingDestination operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeKinesisStreamingDestination for more information on using the DescribeKinesisStreamingDestination -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the DescribeKinesisStreamingDestinationRequest method. -// req, resp := client.DescribeKinesisStreamingDestinationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeKinesisStreamingDestination -func (c *DynamoDB) DescribeKinesisStreamingDestinationRequest(input *DescribeKinesisStreamingDestinationInput) (req *request.Request, output *DescribeKinesisStreamingDestinationOutput) { - op := &request.Operation{ - Name: opDescribeKinesisStreamingDestination, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DescribeKinesisStreamingDestinationInput{} - } - - output = &DescribeKinesisStreamingDestinationOutput{} - req = c.newRequest(op, input, output) - // if custom endpoint for the request is set to a non empty string, - // we skip the endpoint discovery workflow. - if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { - if aws.BoolValue(req.Config.EnableEndpointDiscovery) { - de := discovererDescribeEndpoints{ - Required: false, - EndpointCache: c.endpointCache, - Params: map[string]*string{ - "op": aws.String(req.Operation.Name), - }, - Client: c, - } - - for k, v := range de.Params { - if v == nil { - delete(de.Params, k) - } - } - - req.Handlers.Build.PushFrontNamed(request.NamedHandler{ - Name: "crr.endpointdiscovery", - Fn: de.Handler, - }) - } - } - return -} - -// DescribeKinesisStreamingDestination API operation for Amazon DynamoDB. -// -// Returns information about the status of Kinesis streaming. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB's -// API operation DescribeKinesisStreamingDestination for usage and error information. -// -// Returned Error Types: -// -// - ResourceNotFoundException -// The operation tried to access a nonexistent table or index. The resource -// might not be specified correctly, or its status might not be ACTIVE. -// -// - InternalServerError -// An error occurred on the server side. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeKinesisStreamingDestination -func (c *DynamoDB) DescribeKinesisStreamingDestination(input *DescribeKinesisStreamingDestinationInput) (*DescribeKinesisStreamingDestinationOutput, error) { - req, out := c.DescribeKinesisStreamingDestinationRequest(input) - return out, req.Send() -} - -// DescribeKinesisStreamingDestinationWithContext is the same as DescribeKinesisStreamingDestination with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeKinesisStreamingDestination for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DynamoDB) DescribeKinesisStreamingDestinationWithContext(ctx aws.Context, input *DescribeKinesisStreamingDestinationInput, opts ...request.Option) (*DescribeKinesisStreamingDestinationOutput, error) { - req, out := c.DescribeKinesisStreamingDestinationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDescribeLimits = "DescribeLimits" - -// DescribeLimitsRequest generates a "aws/request.Request" representing the -// client's request for the DescribeLimits operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeLimits for more information on using the DescribeLimits -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the DescribeLimitsRequest method. -// req, resp := client.DescribeLimitsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeLimits -func (c *DynamoDB) DescribeLimitsRequest(input *DescribeLimitsInput) (req *request.Request, output *DescribeLimitsOutput) { - op := &request.Operation{ - Name: opDescribeLimits, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DescribeLimitsInput{} - } - - output = &DescribeLimitsOutput{} - req = c.newRequest(op, input, output) - // if custom endpoint for the request is set to a non empty string, - // we skip the endpoint discovery workflow. - if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { - if aws.BoolValue(req.Config.EnableEndpointDiscovery) { - de := discovererDescribeEndpoints{ - Required: false, - EndpointCache: c.endpointCache, - Params: map[string]*string{ - "op": aws.String(req.Operation.Name), - }, - Client: c, - } - - for k, v := range de.Params { - if v == nil { - delete(de.Params, k) - } - } - - req.Handlers.Build.PushFrontNamed(request.NamedHandler{ - Name: "crr.endpointdiscovery", - Fn: de.Handler, - }) - } - } - return -} - -// DescribeLimits API operation for Amazon DynamoDB. -// -// Returns the current provisioned-capacity quotas for your Amazon Web Services -// account in a Region, both for the Region as a whole and for any one DynamoDB -// table that you create there. -// -// When you establish an Amazon Web Services account, the account has initial -// quotas on the maximum read capacity units and write capacity units that you -// can provision across all of your DynamoDB tables in a given Region. Also, -// there are per-table quotas that apply when you create a table there. For -// more information, see Service, Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) -// page in the Amazon DynamoDB Developer Guide. -// -// Although you can increase these quotas by filing a case at Amazon Web Services -// Support Center (https://console.aws.amazon.com/support/home#/), obtaining -// the increase is not instantaneous. The DescribeLimits action lets you write -// code to compare the capacity you are currently using to those quotas imposed -// by your account so that you have enough time to apply for an increase before -// you hit a quota. -// -// For example, you could use one of the Amazon Web Services SDKs to do the -// following: -// -// Call DescribeLimits for a particular Region to obtain your current account -// quotas on provisioned capacity there. -// -// Create a variable to hold the aggregate read capacity units provisioned for -// all your tables in that Region, and one to hold the aggregate write capacity -// units. Zero them both. -// -// Call ListTables to obtain a list of all your DynamoDB tables. -// -// For each table name listed by ListTables, do the following: -// -// - Call DescribeTable with the table name. -// -// - Use the data returned by DescribeTable to add the read capacity units -// and write capacity units provisioned for the table itself to your variables. -// -// - If the table has one or more global secondary indexes (GSIs), loop over -// these GSIs and add their provisioned capacity values to your variables -// as well. -// -// Report the account quotas for that Region returned by DescribeLimits, along -// with the total current provisioned capacity levels you have calculated. -// -// This will let you see whether you are getting close to your account-level -// quotas. -// -// The per-table quotas apply only when you are creating a new table. They restrict -// the sum of the provisioned capacity of the new table itself and all its global -// secondary indexes. -// -// For existing tables and their GSIs, DynamoDB doesn't let you increase provisioned -// capacity extremely rapidly, but the only quota that applies is that the aggregate -// provisioned capacity over all your tables and GSIs cannot exceed either of -// the per-account quotas. -// -// DescribeLimits should only be called periodically. You can expect throttling -// errors if you call it more than once in a minute. -// -// The DescribeLimits Request element has no content. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB's -// API operation DescribeLimits for usage and error information. -// -// Returned Error Types: -// - InternalServerError -// An error occurred on the server side. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeLimits -func (c *DynamoDB) DescribeLimits(input *DescribeLimitsInput) (*DescribeLimitsOutput, error) { - req, out := c.DescribeLimitsRequest(input) - return out, req.Send() -} - -// DescribeLimitsWithContext is the same as DescribeLimits with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeLimits for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DynamoDB) DescribeLimitsWithContext(ctx aws.Context, input *DescribeLimitsInput, opts ...request.Option) (*DescribeLimitsOutput, error) { - req, out := c.DescribeLimitsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDescribeTable = "DescribeTable" - -// DescribeTableRequest generates a "aws/request.Request" representing the -// client's request for the DescribeTable operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeTable for more information on using the DescribeTable -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the DescribeTableRequest method. -// req, resp := client.DescribeTableRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeTable -func (c *DynamoDB) DescribeTableRequest(input *DescribeTableInput) (req *request.Request, output *DescribeTableOutput) { - op := &request.Operation{ - Name: opDescribeTable, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DescribeTableInput{} - } - - output = &DescribeTableOutput{} - req = c.newRequest(op, input, output) - // if custom endpoint for the request is set to a non empty string, - // we skip the endpoint discovery workflow. - if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { - if aws.BoolValue(req.Config.EnableEndpointDiscovery) { - de := discovererDescribeEndpoints{ - Required: false, - EndpointCache: c.endpointCache, - Params: map[string]*string{ - "op": aws.String(req.Operation.Name), - }, - Client: c, - } - - for k, v := range de.Params { - if v == nil { - delete(de.Params, k) - } - } - - req.Handlers.Build.PushFrontNamed(request.NamedHandler{ - Name: "crr.endpointdiscovery", - Fn: de.Handler, - }) - } - } - return -} - -// DescribeTable API operation for Amazon DynamoDB. -// -// Returns information about the table, including the current status of the -// table, when it was created, the primary key schema, and any indexes on the -// table. -// -// For global tables, this operation only applies to global tables using Version -// 2019.11.21 (Current version). -// -// If you issue a DescribeTable request immediately after a CreateTable request, -// DynamoDB might return a ResourceNotFoundException. This is because DescribeTable -// uses an eventually consistent query, and the metadata for your table might -// not be available at that moment. Wait for a few seconds, and then try the -// DescribeTable request again. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB's -// API operation DescribeTable for usage and error information. -// -// Returned Error Types: -// -// - ResourceNotFoundException -// The operation tried to access a nonexistent table or index. The resource -// might not be specified correctly, or its status might not be ACTIVE. -// -// - InternalServerError -// An error occurred on the server side. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeTable -func (c *DynamoDB) DescribeTable(input *DescribeTableInput) (*DescribeTableOutput, error) { - req, out := c.DescribeTableRequest(input) - return out, req.Send() -} - -// DescribeTableWithContext is the same as DescribeTable with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeTable for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DynamoDB) DescribeTableWithContext(ctx aws.Context, input *DescribeTableInput, opts ...request.Option) (*DescribeTableOutput, error) { - req, out := c.DescribeTableRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDescribeTableReplicaAutoScaling = "DescribeTableReplicaAutoScaling" - -// DescribeTableReplicaAutoScalingRequest generates a "aws/request.Request" representing the -// client's request for the DescribeTableReplicaAutoScaling operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeTableReplicaAutoScaling for more information on using the DescribeTableReplicaAutoScaling -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the DescribeTableReplicaAutoScalingRequest method. -// req, resp := client.DescribeTableReplicaAutoScalingRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeTableReplicaAutoScaling -func (c *DynamoDB) DescribeTableReplicaAutoScalingRequest(input *DescribeTableReplicaAutoScalingInput) (req *request.Request, output *DescribeTableReplicaAutoScalingOutput) { - op := &request.Operation{ - Name: opDescribeTableReplicaAutoScaling, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DescribeTableReplicaAutoScalingInput{} - } - - output = &DescribeTableReplicaAutoScalingOutput{} - req = c.newRequest(op, input, output) - return -} - -// DescribeTableReplicaAutoScaling API operation for Amazon DynamoDB. -// -// Describes auto scaling settings across replicas of the global table at once. -// -// For global tables, this operation only applies to global tables using Version -// 2019.11.21 (Current version). -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB's -// API operation DescribeTableReplicaAutoScaling for usage and error information. -// -// Returned Error Types: -// -// - ResourceNotFoundException -// The operation tried to access a nonexistent table or index. The resource -// might not be specified correctly, or its status might not be ACTIVE. -// -// - InternalServerError -// An error occurred on the server side. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeTableReplicaAutoScaling -func (c *DynamoDB) DescribeTableReplicaAutoScaling(input *DescribeTableReplicaAutoScalingInput) (*DescribeTableReplicaAutoScalingOutput, error) { - req, out := c.DescribeTableReplicaAutoScalingRequest(input) - return out, req.Send() -} - -// DescribeTableReplicaAutoScalingWithContext is the same as DescribeTableReplicaAutoScaling with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeTableReplicaAutoScaling for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DynamoDB) DescribeTableReplicaAutoScalingWithContext(ctx aws.Context, input *DescribeTableReplicaAutoScalingInput, opts ...request.Option) (*DescribeTableReplicaAutoScalingOutput, error) { - req, out := c.DescribeTableReplicaAutoScalingRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDescribeTimeToLive = "DescribeTimeToLive" - -// DescribeTimeToLiveRequest generates a "aws/request.Request" representing the -// client's request for the DescribeTimeToLive operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeTimeToLive for more information on using the DescribeTimeToLive -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the DescribeTimeToLiveRequest method. -// req, resp := client.DescribeTimeToLiveRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeTimeToLive -func (c *DynamoDB) DescribeTimeToLiveRequest(input *DescribeTimeToLiveInput) (req *request.Request, output *DescribeTimeToLiveOutput) { - op := &request.Operation{ - Name: opDescribeTimeToLive, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DescribeTimeToLiveInput{} - } - - output = &DescribeTimeToLiveOutput{} - req = c.newRequest(op, input, output) - // if custom endpoint for the request is set to a non empty string, - // we skip the endpoint discovery workflow. - if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { - if aws.BoolValue(req.Config.EnableEndpointDiscovery) { - de := discovererDescribeEndpoints{ - Required: false, - EndpointCache: c.endpointCache, - Params: map[string]*string{ - "op": aws.String(req.Operation.Name), - }, - Client: c, - } - - for k, v := range de.Params { - if v == nil { - delete(de.Params, k) - } - } - - req.Handlers.Build.PushFrontNamed(request.NamedHandler{ - Name: "crr.endpointdiscovery", - Fn: de.Handler, - }) - } - } - return -} - -// DescribeTimeToLive API operation for Amazon DynamoDB. -// -// Gives a description of the Time to Live (TTL) status on the specified table. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB's -// API operation DescribeTimeToLive for usage and error information. -// -// Returned Error Types: -// -// - ResourceNotFoundException -// The operation tried to access a nonexistent table or index. The resource -// might not be specified correctly, or its status might not be ACTIVE. -// -// - InternalServerError -// An error occurred on the server side. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeTimeToLive -func (c *DynamoDB) DescribeTimeToLive(input *DescribeTimeToLiveInput) (*DescribeTimeToLiveOutput, error) { - req, out := c.DescribeTimeToLiveRequest(input) - return out, req.Send() -} - -// DescribeTimeToLiveWithContext is the same as DescribeTimeToLive with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeTimeToLive for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DynamoDB) DescribeTimeToLiveWithContext(ctx aws.Context, input *DescribeTimeToLiveInput, opts ...request.Option) (*DescribeTimeToLiveOutput, error) { - req, out := c.DescribeTimeToLiveRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDisableKinesisStreamingDestination = "DisableKinesisStreamingDestination" - -// DisableKinesisStreamingDestinationRequest generates a "aws/request.Request" representing the -// client's request for the DisableKinesisStreamingDestination operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DisableKinesisStreamingDestination for more information on using the DisableKinesisStreamingDestination -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the DisableKinesisStreamingDestinationRequest method. -// req, resp := client.DisableKinesisStreamingDestinationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DisableKinesisStreamingDestination -func (c *DynamoDB) DisableKinesisStreamingDestinationRequest(input *DisableKinesisStreamingDestinationInput) (req *request.Request, output *DisableKinesisStreamingDestinationOutput) { - op := &request.Operation{ - Name: opDisableKinesisStreamingDestination, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DisableKinesisStreamingDestinationInput{} - } - - output = &DisableKinesisStreamingDestinationOutput{} - req = c.newRequest(op, input, output) - // if custom endpoint for the request is set to a non empty string, - // we skip the endpoint discovery workflow. - if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { - if aws.BoolValue(req.Config.EnableEndpointDiscovery) { - de := discovererDescribeEndpoints{ - Required: false, - EndpointCache: c.endpointCache, - Params: map[string]*string{ - "op": aws.String(req.Operation.Name), - }, - Client: c, - } - - for k, v := range de.Params { - if v == nil { - delete(de.Params, k) - } - } - - req.Handlers.Build.PushFrontNamed(request.NamedHandler{ - Name: "crr.endpointdiscovery", - Fn: de.Handler, - }) - } - } - return -} - -// DisableKinesisStreamingDestination API operation for Amazon DynamoDB. -// -// Stops replication from the DynamoDB table to the Kinesis data stream. This -// is done without deleting either of the resources. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB's -// API operation DisableKinesisStreamingDestination for usage and error information. -// -// Returned Error Types: -// -// - InternalServerError -// An error occurred on the server side. -// -// - LimitExceededException -// There is no limit to the number of daily on-demand backups that can be taken. -// -// For most purposes, up to 500 simultaneous table operations are allowed per -// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, -// RestoreTableFromBackup, and RestoreTableToPointInTime. -// -// When you are creating a table with one or more secondary indexes, you can -// have up to 250 such requests running at a time. However, if the table or -// index specifications are complex, then DynamoDB might temporarily reduce -// the number of concurrent operations. -// -// When importing into DynamoDB, up to 50 simultaneous import table operations -// are allowed per account. -// -// There is a soft account quota of 2,500 tables. -// -// GetRecords was called with a value of more than 1000 for the limit request -// parameter. -// -// More than 2 processes are reading from the same streams shard at the same -// time. Exceeding this limit may result in request throttling. -// -// - ResourceInUseException -// The operation conflicts with the resource's availability. For example, you -// attempted to recreate an existing table, or tried to delete a table currently -// in the CREATING state. -// -// - ResourceNotFoundException -// The operation tried to access a nonexistent table or index. The resource -// might not be specified correctly, or its status might not be ACTIVE. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DisableKinesisStreamingDestination -func (c *DynamoDB) DisableKinesisStreamingDestination(input *DisableKinesisStreamingDestinationInput) (*DisableKinesisStreamingDestinationOutput, error) { - req, out := c.DisableKinesisStreamingDestinationRequest(input) - return out, req.Send() -} - -// DisableKinesisStreamingDestinationWithContext is the same as DisableKinesisStreamingDestination with the addition of -// the ability to pass a context and additional request options. -// -// See DisableKinesisStreamingDestination for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DynamoDB) DisableKinesisStreamingDestinationWithContext(ctx aws.Context, input *DisableKinesisStreamingDestinationInput, opts ...request.Option) (*DisableKinesisStreamingDestinationOutput, error) { - req, out := c.DisableKinesisStreamingDestinationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opEnableKinesisStreamingDestination = "EnableKinesisStreamingDestination" - -// EnableKinesisStreamingDestinationRequest generates a "aws/request.Request" representing the -// client's request for the EnableKinesisStreamingDestination operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See EnableKinesisStreamingDestination for more information on using the EnableKinesisStreamingDestination -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the EnableKinesisStreamingDestinationRequest method. -// req, resp := client.EnableKinesisStreamingDestinationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/EnableKinesisStreamingDestination -func (c *DynamoDB) EnableKinesisStreamingDestinationRequest(input *EnableKinesisStreamingDestinationInput) (req *request.Request, output *EnableKinesisStreamingDestinationOutput) { - op := &request.Operation{ - Name: opEnableKinesisStreamingDestination, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &EnableKinesisStreamingDestinationInput{} - } - - output = &EnableKinesisStreamingDestinationOutput{} - req = c.newRequest(op, input, output) - // if custom endpoint for the request is set to a non empty string, - // we skip the endpoint discovery workflow. - if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { - if aws.BoolValue(req.Config.EnableEndpointDiscovery) { - de := discovererDescribeEndpoints{ - Required: false, - EndpointCache: c.endpointCache, - Params: map[string]*string{ - "op": aws.String(req.Operation.Name), - }, - Client: c, - } - - for k, v := range de.Params { - if v == nil { - delete(de.Params, k) - } - } - - req.Handlers.Build.PushFrontNamed(request.NamedHandler{ - Name: "crr.endpointdiscovery", - Fn: de.Handler, - }) - } - } - return -} - -// EnableKinesisStreamingDestination API operation for Amazon DynamoDB. -// -// Starts table data replication to the specified Kinesis data stream at a timestamp -// chosen during the enable workflow. If this operation doesn't return results -// immediately, use DescribeKinesisStreamingDestination to check if streaming -// to the Kinesis data stream is ACTIVE. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB's -// API operation EnableKinesisStreamingDestination for usage and error information. -// -// Returned Error Types: -// -// - InternalServerError -// An error occurred on the server side. -// -// - LimitExceededException -// There is no limit to the number of daily on-demand backups that can be taken. -// -// For most purposes, up to 500 simultaneous table operations are allowed per -// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, -// RestoreTableFromBackup, and RestoreTableToPointInTime. -// -// When you are creating a table with one or more secondary indexes, you can -// have up to 250 such requests running at a time. However, if the table or -// index specifications are complex, then DynamoDB might temporarily reduce -// the number of concurrent operations. -// -// When importing into DynamoDB, up to 50 simultaneous import table operations -// are allowed per account. -// -// There is a soft account quota of 2,500 tables. -// -// GetRecords was called with a value of more than 1000 for the limit request -// parameter. -// -// More than 2 processes are reading from the same streams shard at the same -// time. Exceeding this limit may result in request throttling. -// -// - ResourceInUseException -// The operation conflicts with the resource's availability. For example, you -// attempted to recreate an existing table, or tried to delete a table currently -// in the CREATING state. -// -// - ResourceNotFoundException -// The operation tried to access a nonexistent table or index. The resource -// might not be specified correctly, or its status might not be ACTIVE. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/EnableKinesisStreamingDestination -func (c *DynamoDB) EnableKinesisStreamingDestination(input *EnableKinesisStreamingDestinationInput) (*EnableKinesisStreamingDestinationOutput, error) { - req, out := c.EnableKinesisStreamingDestinationRequest(input) - return out, req.Send() -} - -// EnableKinesisStreamingDestinationWithContext is the same as EnableKinesisStreamingDestination with the addition of -// the ability to pass a context and additional request options. -// -// See EnableKinesisStreamingDestination for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DynamoDB) EnableKinesisStreamingDestinationWithContext(ctx aws.Context, input *EnableKinesisStreamingDestinationInput, opts ...request.Option) (*EnableKinesisStreamingDestinationOutput, error) { - req, out := c.EnableKinesisStreamingDestinationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opExecuteStatement = "ExecuteStatement" - -// ExecuteStatementRequest generates a "aws/request.Request" representing the -// client's request for the ExecuteStatement operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ExecuteStatement for more information on using the ExecuteStatement -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the ExecuteStatementRequest method. -// req, resp := client.ExecuteStatementRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ExecuteStatement -func (c *DynamoDB) ExecuteStatementRequest(input *ExecuteStatementInput) (req *request.Request, output *ExecuteStatementOutput) { - op := &request.Operation{ - Name: opExecuteStatement, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &ExecuteStatementInput{} - } - - output = &ExecuteStatementOutput{} - req = c.newRequest(op, input, output) - return -} - -// ExecuteStatement API operation for Amazon DynamoDB. -// -// This operation allows you to perform reads and singleton writes on data stored -// in DynamoDB, using PartiQL. -// -// For PartiQL reads (SELECT statement), if the total number of processed items -// exceeds the maximum dataset size limit of 1 MB, the read stops and results -// are returned to the user as a LastEvaluatedKey value to continue the read -// in a subsequent operation. If the filter criteria in WHERE clause does not -// match any data, the read will return an empty result set. -// -// A single SELECT statement response can return up to the maximum number of -// items (if using the Limit parameter) or a maximum of 1 MB of data (and then -// apply any filtering to the results using WHERE clause). If LastEvaluatedKey -// is present in the response, you need to paginate the result set. If NextToken -// is present, you need to paginate the result set and include NextToken. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB's -// API operation ExecuteStatement for usage and error information. -// -// Returned Error Types: -// -// - ConditionalCheckFailedException -// A condition specified in the operation could not be evaluated. -// -// - ProvisionedThroughputExceededException -// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB -// automatically retry requests that receive this exception. Your request is -// eventually successful, unless your retry queue is too large to finish. Reduce -// the frequency of requests and use exponential backoff. For more information, -// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) -// in the Amazon DynamoDB Developer Guide. -// -// - ResourceNotFoundException -// The operation tried to access a nonexistent table or index. The resource -// might not be specified correctly, or its status might not be ACTIVE. -// -// - ItemCollectionSizeLimitExceededException -// An item collection is too large. This exception is only returned for tables -// that have one or more local secondary indexes. -// -// - TransactionConflictException -// Operation was rejected because there is an ongoing transaction for the item. -// -// - RequestLimitExceeded -// Throughput exceeds the current throughput quota for your account. Please -// contact Amazon Web Services Support (https://aws.amazon.com/support) to request -// a quota increase. -// -// - InternalServerError -// An error occurred on the server side. -// -// - DuplicateItemException -// There was an attempt to insert an item with the same primary key as an item -// that already exists in the DynamoDB table. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ExecuteStatement -func (c *DynamoDB) ExecuteStatement(input *ExecuteStatementInput) (*ExecuteStatementOutput, error) { - req, out := c.ExecuteStatementRequest(input) - return out, req.Send() -} - -// ExecuteStatementWithContext is the same as ExecuteStatement with the addition of -// the ability to pass a context and additional request options. -// -// See ExecuteStatement for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DynamoDB) ExecuteStatementWithContext(ctx aws.Context, input *ExecuteStatementInput, opts ...request.Option) (*ExecuteStatementOutput, error) { - req, out := c.ExecuteStatementRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opExecuteTransaction = "ExecuteTransaction" - -// ExecuteTransactionRequest generates a "aws/request.Request" representing the -// client's request for the ExecuteTransaction operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ExecuteTransaction for more information on using the ExecuteTransaction -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the ExecuteTransactionRequest method. -// req, resp := client.ExecuteTransactionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ExecuteTransaction -func (c *DynamoDB) ExecuteTransactionRequest(input *ExecuteTransactionInput) (req *request.Request, output *ExecuteTransactionOutput) { - op := &request.Operation{ - Name: opExecuteTransaction, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &ExecuteTransactionInput{} - } - - output = &ExecuteTransactionOutput{} - req = c.newRequest(op, input, output) - return -} - -// ExecuteTransaction API operation for Amazon DynamoDB. -// -// This operation allows you to perform transactional reads or writes on data -// stored in DynamoDB, using PartiQL. -// -// The entire transaction must consist of either read statements or write statements, -// you cannot mix both in one transaction. The EXISTS function is an exception -// and can be used to check the condition of specific attributes of the item -// in a similar manner to ConditionCheck in the TransactWriteItems (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/transaction-apis.html#transaction-apis-txwriteitems) -// API. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB's -// API operation ExecuteTransaction for usage and error information. -// -// Returned Error Types: -// -// - ResourceNotFoundException -// The operation tried to access a nonexistent table or index. The resource -// might not be specified correctly, or its status might not be ACTIVE. -// -// - TransactionCanceledException -// The entire transaction request was canceled. -// -// DynamoDB cancels a TransactWriteItems request under the following circumstances: -// -// - A condition in one of the condition expressions is not met. -// -// - A table in the TransactWriteItems request is in a different account -// or region. -// -// - More than one action in the TransactWriteItems operation targets the -// same item. -// -// - There is insufficient provisioned capacity for the transaction to be -// completed. -// -// - An item size becomes too large (larger than 400 KB), or a local secondary -// index (LSI) becomes too large, or a similar validation error occurs because -// of changes made by the transaction. -// -// - There is a user error, such as an invalid data format. -// -// - There is an ongoing TransactWriteItems operation that conflicts with -// a concurrent TransactWriteItems request. In this case the TransactWriteItems -// operation fails with a TransactionCanceledException. -// -// DynamoDB cancels a TransactGetItems request under the following circumstances: -// -// - There is an ongoing TransactGetItems operation that conflicts with a -// concurrent PutItem, UpdateItem, DeleteItem or TransactWriteItems request. -// In this case the TransactGetItems operation fails with a TransactionCanceledException. -// -// - A table in the TransactGetItems request is in a different account or -// region. -// -// - There is insufficient provisioned capacity for the transaction to be -// completed. -// -// - There is a user error, such as an invalid data format. -// -// If using Java, DynamoDB lists the cancellation reasons on the CancellationReasons -// property. This property is not set for other languages. Transaction cancellation -// reasons are ordered in the order of requested items, if an item has no error -// it will have None code and Null message. -// -// Cancellation reason codes and possible error messages: -// -// - No Errors: Code: None Message: null -// -// - Conditional Check Failed: Code: ConditionalCheckFailed Message: The -// conditional request failed. -// -// - Item Collection Size Limit Exceeded: Code: ItemCollectionSizeLimitExceeded -// Message: Collection size exceeded. -// -// - Transaction Conflict: Code: TransactionConflict Message: Transaction -// is ongoing for the item. -// -// - Provisioned Throughput Exceeded: Code: ProvisionedThroughputExceeded -// Messages: The level of configured provisioned throughput for the table -// was exceeded. Consider increasing your provisioning level with the UpdateTable -// API. This Message is received when provisioned throughput is exceeded -// is on a provisioned DynamoDB table. The level of configured provisioned -// throughput for one or more global secondary indexes of the table was exceeded. -// Consider increasing your provisioning level for the under-provisioned -// global secondary indexes with the UpdateTable API. This message is returned -// when provisioned throughput is exceeded is on a provisioned GSI. -// -// - Throttling Error: Code: ThrottlingError Messages: Throughput exceeds -// the current capacity of your table or index. DynamoDB is automatically -// scaling your table or index so please try again shortly. If exceptions -// persist, check if you have a hot key: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/bp-partition-key-design.html. -// This message is returned when writes get throttled on an On-Demand table -// as DynamoDB is automatically scaling the table. Throughput exceeds the -// current capacity for one or more global secondary indexes. DynamoDB is -// automatically scaling your index so please try again shortly. This message -// is returned when writes get throttled on an On-Demand GSI as DynamoDB -// is automatically scaling the GSI. -// -// - Validation Error: Code: ValidationError Messages: One or more parameter -// values were invalid. The update expression attempted to update the secondary -// index key beyond allowed size limits. The update expression attempted -// to update the secondary index key to unsupported type. An operand in the -// update expression has an incorrect data type. Item size to update has -// exceeded the maximum allowed size. Number overflow. Attempting to store -// a number with magnitude larger than supported range. Type mismatch for -// attribute to update. Nesting Levels have exceeded supported limits. The -// document path provided in the update expression is invalid for update. -// The provided expression refers to an attribute that does not exist in -// the item. -// -// - TransactionInProgressException -// The transaction with the given request token is already in progress. -// -// Recommended Settings -// -// This is a general recommendation for handling the TransactionInProgressException. -// These settings help ensure that the client retries will trigger completion -// of the ongoing TransactWriteItems request. -// -// - Set clientExecutionTimeout to a value that allows at least one retry -// to be processed after 5 seconds have elapsed since the first attempt for -// the TransactWriteItems operation. -// -// - Set socketTimeout to a value a little lower than the requestTimeout -// setting. -// -// - requestTimeout should be set based on the time taken for the individual -// retries of a single HTTP request for your use case, but setting it to -// 1 second or higher should work well to reduce chances of retries and TransactionInProgressException -// errors. -// -// - Use exponential backoff when retrying and tune backoff if needed. -// -// Assuming default retry policy (https://github.com/aws/aws-sdk-java/blob/fd409dee8ae23fb8953e0bb4dbde65536a7e0514/aws-java-sdk-core/src/main/java/com/amazonaws/retry/PredefinedRetryPolicies.java#L97), -// example timeout settings based on the guidelines above are as follows: -// -// Example timeline: -// -// - 0-1000 first attempt -// -// - 1000-1500 first sleep/delay (default retry policy uses 500 ms as base -// delay for 4xx errors) -// -// - 1500-2500 second attempt -// -// - 2500-3500 second sleep/delay (500 * 2, exponential backoff) -// -// - 3500-4500 third attempt -// -// - 4500-6500 third sleep/delay (500 * 2^2) -// -// - 6500-7500 fourth attempt (this can trigger inline recovery since 5 seconds -// have elapsed since the first attempt reached TC) -// -// - IdempotentParameterMismatchException -// DynamoDB rejected the request because you retried a request with a different -// payload but with an idempotent token that was already used. -// -// - ProvisionedThroughputExceededException -// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB -// automatically retry requests that receive this exception. Your request is -// eventually successful, unless your retry queue is too large to finish. Reduce -// the frequency of requests and use exponential backoff. For more information, -// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) -// in the Amazon DynamoDB Developer Guide. -// -// - RequestLimitExceeded -// Throughput exceeds the current throughput quota for your account. Please -// contact Amazon Web Services Support (https://aws.amazon.com/support) to request -// a quota increase. -// -// - InternalServerError -// An error occurred on the server side. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ExecuteTransaction -func (c *DynamoDB) ExecuteTransaction(input *ExecuteTransactionInput) (*ExecuteTransactionOutput, error) { - req, out := c.ExecuteTransactionRequest(input) - return out, req.Send() -} - -// ExecuteTransactionWithContext is the same as ExecuteTransaction with the addition of -// the ability to pass a context and additional request options. -// -// See ExecuteTransaction for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DynamoDB) ExecuteTransactionWithContext(ctx aws.Context, input *ExecuteTransactionInput, opts ...request.Option) (*ExecuteTransactionOutput, error) { - req, out := c.ExecuteTransactionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opExportTableToPointInTime = "ExportTableToPointInTime" - -// ExportTableToPointInTimeRequest generates a "aws/request.Request" representing the -// client's request for the ExportTableToPointInTime operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ExportTableToPointInTime for more information on using the ExportTableToPointInTime -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the ExportTableToPointInTimeRequest method. -// req, resp := client.ExportTableToPointInTimeRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ExportTableToPointInTime -func (c *DynamoDB) ExportTableToPointInTimeRequest(input *ExportTableToPointInTimeInput) (req *request.Request, output *ExportTableToPointInTimeOutput) { - op := &request.Operation{ - Name: opExportTableToPointInTime, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &ExportTableToPointInTimeInput{} - } - - output = &ExportTableToPointInTimeOutput{} - req = c.newRequest(op, input, output) - return -} - -// ExportTableToPointInTime API operation for Amazon DynamoDB. -// -// Exports table data to an S3 bucket. The table must have point in time recovery -// enabled, and you can export data from any time within the point in time recovery -// window. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB's -// API operation ExportTableToPointInTime for usage and error information. -// -// Returned Error Types: -// -// - TableNotFoundException -// A source table with the name TableName does not currently exist within the -// subscriber's account or the subscriber is operating in the wrong Amazon Web -// Services Region. -// -// - PointInTimeRecoveryUnavailableException -// Point in time recovery has not yet been enabled for this source table. -// -// - LimitExceededException -// There is no limit to the number of daily on-demand backups that can be taken. -// -// For most purposes, up to 500 simultaneous table operations are allowed per -// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, -// RestoreTableFromBackup, and RestoreTableToPointInTime. -// -// When you are creating a table with one or more secondary indexes, you can -// have up to 250 such requests running at a time. However, if the table or -// index specifications are complex, then DynamoDB might temporarily reduce -// the number of concurrent operations. -// -// When importing into DynamoDB, up to 50 simultaneous import table operations -// are allowed per account. -// -// There is a soft account quota of 2,500 tables. -// -// GetRecords was called with a value of more than 1000 for the limit request -// parameter. -// -// More than 2 processes are reading from the same streams shard at the same -// time. Exceeding this limit may result in request throttling. -// -// - InvalidExportTimeException -// The specified ExportTime is outside of the point in time recovery window. -// -// - ExportConflictException -// There was a conflict when writing to the specified S3 bucket. -// -// - InternalServerError -// An error occurred on the server side. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ExportTableToPointInTime -func (c *DynamoDB) ExportTableToPointInTime(input *ExportTableToPointInTimeInput) (*ExportTableToPointInTimeOutput, error) { - req, out := c.ExportTableToPointInTimeRequest(input) - return out, req.Send() -} - -// ExportTableToPointInTimeWithContext is the same as ExportTableToPointInTime with the addition of -// the ability to pass a context and additional request options. -// -// See ExportTableToPointInTime for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DynamoDB) ExportTableToPointInTimeWithContext(ctx aws.Context, input *ExportTableToPointInTimeInput, opts ...request.Option) (*ExportTableToPointInTimeOutput, error) { - req, out := c.ExportTableToPointInTimeRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetItem = "GetItem" - -// GetItemRequest generates a "aws/request.Request" representing the -// client's request for the GetItem operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetItem for more information on using the GetItem -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the GetItemRequest method. -// req, resp := client.GetItemRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/GetItem -func (c *DynamoDB) GetItemRequest(input *GetItemInput) (req *request.Request, output *GetItemOutput) { - op := &request.Operation{ - Name: opGetItem, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetItemInput{} - } - - output = &GetItemOutput{} - req = c.newRequest(op, input, output) - // if custom endpoint for the request is set to a non empty string, - // we skip the endpoint discovery workflow. - if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { - if aws.BoolValue(req.Config.EnableEndpointDiscovery) { - de := discovererDescribeEndpoints{ - Required: false, - EndpointCache: c.endpointCache, - Params: map[string]*string{ - "op": aws.String(req.Operation.Name), - }, - Client: c, - } - - for k, v := range de.Params { - if v == nil { - delete(de.Params, k) - } - } - - req.Handlers.Build.PushFrontNamed(request.NamedHandler{ - Name: "crr.endpointdiscovery", - Fn: de.Handler, - }) - } - } - return -} - -// GetItem API operation for Amazon DynamoDB. -// -// The GetItem operation returns a set of attributes for the item with the given -// primary key. If there is no matching item, GetItem does not return any data -// and there will be no Item element in the response. -// -// GetItem provides an eventually consistent read by default. If your application -// requires a strongly consistent read, set ConsistentRead to true. Although -// a strongly consistent read might take more time than an eventually consistent -// read, it always returns the last updated value. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB's -// API operation GetItem for usage and error information. -// -// Returned Error Types: -// -// - ProvisionedThroughputExceededException -// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB -// automatically retry requests that receive this exception. Your request is -// eventually successful, unless your retry queue is too large to finish. Reduce -// the frequency of requests and use exponential backoff. For more information, -// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) -// in the Amazon DynamoDB Developer Guide. -// -// - ResourceNotFoundException -// The operation tried to access a nonexistent table or index. The resource -// might not be specified correctly, or its status might not be ACTIVE. -// -// - RequestLimitExceeded -// Throughput exceeds the current throughput quota for your account. Please -// contact Amazon Web Services Support (https://aws.amazon.com/support) to request -// a quota increase. -// -// - InternalServerError -// An error occurred on the server side. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/GetItem -func (c *DynamoDB) GetItem(input *GetItemInput) (*GetItemOutput, error) { - req, out := c.GetItemRequest(input) - return out, req.Send() -} - -// GetItemWithContext is the same as GetItem with the addition of -// the ability to pass a context and additional request options. -// -// See GetItem for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DynamoDB) GetItemWithContext(ctx aws.Context, input *GetItemInput, opts ...request.Option) (*GetItemOutput, error) { - req, out := c.GetItemRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetResourcePolicy = "GetResourcePolicy" - -// GetResourcePolicyRequest generates a "aws/request.Request" representing the -// client's request for the GetResourcePolicy operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetResourcePolicy for more information on using the GetResourcePolicy -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the GetResourcePolicyRequest method. -// req, resp := client.GetResourcePolicyRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/GetResourcePolicy -func (c *DynamoDB) GetResourcePolicyRequest(input *GetResourcePolicyInput) (req *request.Request, output *GetResourcePolicyOutput) { - op := &request.Operation{ - Name: opGetResourcePolicy, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetResourcePolicyInput{} - } - - output = &GetResourcePolicyOutput{} - req = c.newRequest(op, input, output) - // if custom endpoint for the request is set to a non empty string, - // we skip the endpoint discovery workflow. - if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { - if aws.BoolValue(req.Config.EnableEndpointDiscovery) { - de := discovererDescribeEndpoints{ - Required: false, - EndpointCache: c.endpointCache, - Params: map[string]*string{ - "op": aws.String(req.Operation.Name), - }, - Client: c, - } - - for k, v := range de.Params { - if v == nil { - delete(de.Params, k) - } - } - - req.Handlers.Build.PushFrontNamed(request.NamedHandler{ - Name: "crr.endpointdiscovery", - Fn: de.Handler, - }) - } - } - return -} - -// GetResourcePolicy API operation for Amazon DynamoDB. -// -// Returns the resource-based policy document attached to the resource, which -// can be a table or stream, in JSON format. -// -// GetResourcePolicy follows an eventually consistent (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadConsistency.html) -// model. The following list describes the outcomes when you issue the GetResourcePolicy -// request immediately after issuing another request: -// -// - If you issue a GetResourcePolicy request immediately after a PutResourcePolicy -// request, DynamoDB might return a PolicyNotFoundException. -// -// - If you issue a GetResourcePolicyrequest immediately after a DeleteResourcePolicy -// request, DynamoDB might return the policy that was present before the -// deletion request. -// -// - If you issue a GetResourcePolicy request immediately after a CreateTable -// request, which includes a resource-based policy, DynamoDB might return -// a ResourceNotFoundException or a PolicyNotFoundException. -// -// Because GetResourcePolicy uses an eventually consistent query, the metadata -// for your policy or table might not be available at that moment. Wait for -// a few seconds, and then retry the GetResourcePolicy request. -// -// After a GetResourcePolicy request returns a policy created using the PutResourcePolicy -// request, the policy will be applied in the authorization of requests to the -// resource. Because this process is eventually consistent, it will take some -// time to apply the policy to all requests to a resource. Policies that you -// attach while creating a table using the CreateTable request will always be -// applied to all requests for that table. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB's -// API operation GetResourcePolicy for usage and error information. -// -// Returned Error Types: -// -// - ResourceNotFoundException -// The operation tried to access a nonexistent table or index. The resource -// might not be specified correctly, or its status might not be ACTIVE. -// -// - InternalServerError -// An error occurred on the server side. -// -// - PolicyNotFoundException -// The operation tried to access a nonexistent resource-based policy. -// -// If you specified an ExpectedRevisionId, it's possible that a policy is present -// for the resource but its revision ID didn't match the expected value. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/GetResourcePolicy -func (c *DynamoDB) GetResourcePolicy(input *GetResourcePolicyInput) (*GetResourcePolicyOutput, error) { - req, out := c.GetResourcePolicyRequest(input) - return out, req.Send() -} - -// GetResourcePolicyWithContext is the same as GetResourcePolicy with the addition of -// the ability to pass a context and additional request options. -// -// See GetResourcePolicy for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DynamoDB) GetResourcePolicyWithContext(ctx aws.Context, input *GetResourcePolicyInput, opts ...request.Option) (*GetResourcePolicyOutput, error) { - req, out := c.GetResourcePolicyRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opImportTable = "ImportTable" - -// ImportTableRequest generates a "aws/request.Request" representing the -// client's request for the ImportTable operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ImportTable for more information on using the ImportTable -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the ImportTableRequest method. -// req, resp := client.ImportTableRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ImportTable -func (c *DynamoDB) ImportTableRequest(input *ImportTableInput) (req *request.Request, output *ImportTableOutput) { - op := &request.Operation{ - Name: opImportTable, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &ImportTableInput{} - } - - output = &ImportTableOutput{} - req = c.newRequest(op, input, output) - return -} - -// ImportTable API operation for Amazon DynamoDB. -// -// Imports table data from an S3 bucket. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB's -// API operation ImportTable for usage and error information. -// -// Returned Error Types: -// -// - ResourceInUseException -// The operation conflicts with the resource's availability. For example, you -// attempted to recreate an existing table, or tried to delete a table currently -// in the CREATING state. -// -// - LimitExceededException -// There is no limit to the number of daily on-demand backups that can be taken. -// -// For most purposes, up to 500 simultaneous table operations are allowed per -// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, -// RestoreTableFromBackup, and RestoreTableToPointInTime. -// -// When you are creating a table with one or more secondary indexes, you can -// have up to 250 such requests running at a time. However, if the table or -// index specifications are complex, then DynamoDB might temporarily reduce -// the number of concurrent operations. -// -// When importing into DynamoDB, up to 50 simultaneous import table operations -// are allowed per account. -// -// There is a soft account quota of 2,500 tables. -// -// GetRecords was called with a value of more than 1000 for the limit request -// parameter. -// -// More than 2 processes are reading from the same streams shard at the same -// time. Exceeding this limit may result in request throttling. -// -// - ImportConflictException -// There was a conflict when importing from the specified S3 source. This can -// occur when the current import conflicts with a previous import request that -// had the same client token. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ImportTable -func (c *DynamoDB) ImportTable(input *ImportTableInput) (*ImportTableOutput, error) { - req, out := c.ImportTableRequest(input) - return out, req.Send() -} - -// ImportTableWithContext is the same as ImportTable with the addition of -// the ability to pass a context and additional request options. -// -// See ImportTable for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DynamoDB) ImportTableWithContext(ctx aws.Context, input *ImportTableInput, opts ...request.Option) (*ImportTableOutput, error) { - req, out := c.ImportTableRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opListBackups = "ListBackups" - -// ListBackupsRequest generates a "aws/request.Request" representing the -// client's request for the ListBackups operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListBackups for more information on using the ListBackups -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the ListBackupsRequest method. -// req, resp := client.ListBackupsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListBackups -func (c *DynamoDB) ListBackupsRequest(input *ListBackupsInput) (req *request.Request, output *ListBackupsOutput) { - op := &request.Operation{ - Name: opListBackups, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &ListBackupsInput{} - } - - output = &ListBackupsOutput{} - req = c.newRequest(op, input, output) - // if custom endpoint for the request is set to a non empty string, - // we skip the endpoint discovery workflow. - if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { - if aws.BoolValue(req.Config.EnableEndpointDiscovery) { - de := discovererDescribeEndpoints{ - Required: false, - EndpointCache: c.endpointCache, - Params: map[string]*string{ - "op": aws.String(req.Operation.Name), - }, - Client: c, - } - - for k, v := range de.Params { - if v == nil { - delete(de.Params, k) - } - } - - req.Handlers.Build.PushFrontNamed(request.NamedHandler{ - Name: "crr.endpointdiscovery", - Fn: de.Handler, - }) - } - } - return -} - -// ListBackups API operation for Amazon DynamoDB. -// -// List DynamoDB backups that are associated with an Amazon Web Services account -// and weren't made with Amazon Web Services Backup. To list these backups for -// a given table, specify TableName. ListBackups returns a paginated list of -// results with at most 1 MB worth of items in a page. You can also specify -// a maximum number of entries to be returned in a page. -// -// In the request, start time is inclusive, but end time is exclusive. Note -// that these boundaries are for the time at which the original backup was requested. -// -// You can call ListBackups a maximum of five times per second. -// -// If you want to retrieve the complete list of backups made with Amazon Web -// Services Backup, use the Amazon Web Services Backup list API. (https://docs.aws.amazon.com/aws-backup/latest/devguide/API_ListBackupJobs.html) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB's -// API operation ListBackups for usage and error information. -// -// Returned Error Types: -// - InternalServerError -// An error occurred on the server side. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListBackups -func (c *DynamoDB) ListBackups(input *ListBackupsInput) (*ListBackupsOutput, error) { - req, out := c.ListBackupsRequest(input) - return out, req.Send() -} - -// ListBackupsWithContext is the same as ListBackups with the addition of -// the ability to pass a context and additional request options. -// -// See ListBackups for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DynamoDB) ListBackupsWithContext(ctx aws.Context, input *ListBackupsInput, opts ...request.Option) (*ListBackupsOutput, error) { - req, out := c.ListBackupsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opListContributorInsights = "ListContributorInsights" - -// ListContributorInsightsRequest generates a "aws/request.Request" representing the -// client's request for the ListContributorInsights operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListContributorInsights for more information on using the ListContributorInsights -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the ListContributorInsightsRequest method. -// req, resp := client.ListContributorInsightsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListContributorInsights -func (c *DynamoDB) ListContributorInsightsRequest(input *ListContributorInsightsInput) (req *request.Request, output *ListContributorInsightsOutput) { - op := &request.Operation{ - Name: opListContributorInsights, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, - } - - if input == nil { - input = &ListContributorInsightsInput{} - } - - output = &ListContributorInsightsOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListContributorInsights API operation for Amazon DynamoDB. -// -// Returns a list of ContributorInsightsSummary for a table and all its global -// secondary indexes. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB's -// API operation ListContributorInsights for usage and error information. -// -// Returned Error Types: -// -// - ResourceNotFoundException -// The operation tried to access a nonexistent table or index. The resource -// might not be specified correctly, or its status might not be ACTIVE. -// -// - InternalServerError -// An error occurred on the server side. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListContributorInsights -func (c *DynamoDB) ListContributorInsights(input *ListContributorInsightsInput) (*ListContributorInsightsOutput, error) { - req, out := c.ListContributorInsightsRequest(input) - return out, req.Send() -} - -// ListContributorInsightsWithContext is the same as ListContributorInsights with the addition of -// the ability to pass a context and additional request options. -// -// See ListContributorInsights for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DynamoDB) ListContributorInsightsWithContext(ctx aws.Context, input *ListContributorInsightsInput, opts ...request.Option) (*ListContributorInsightsOutput, error) { - req, out := c.ListContributorInsightsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// ListContributorInsightsPages iterates over the pages of a ListContributorInsights operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListContributorInsights method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListContributorInsights operation. -// pageNum := 0 -// err := client.ListContributorInsightsPages(params, -// func(page *dynamodb.ListContributorInsightsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -func (c *DynamoDB) ListContributorInsightsPages(input *ListContributorInsightsInput, fn func(*ListContributorInsightsOutput, bool) bool) error { - return c.ListContributorInsightsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListContributorInsightsPagesWithContext same as ListContributorInsightsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DynamoDB) ListContributorInsightsPagesWithContext(ctx aws.Context, input *ListContributorInsightsInput, fn func(*ListContributorInsightsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListContributorInsightsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListContributorInsightsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListContributorInsightsOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opListExports = "ListExports" - -// ListExportsRequest generates a "aws/request.Request" representing the -// client's request for the ListExports operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListExports for more information on using the ListExports -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the ListExportsRequest method. -// req, resp := client.ListExportsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListExports -func (c *DynamoDB) ListExportsRequest(input *ListExportsInput) (req *request.Request, output *ListExportsOutput) { - op := &request.Operation{ - Name: opListExports, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, - } - - if input == nil { - input = &ListExportsInput{} - } - - output = &ListExportsOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListExports API operation for Amazon DynamoDB. -// -// Lists completed exports within the past 90 days. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB's -// API operation ListExports for usage and error information. -// -// Returned Error Types: -// -// - LimitExceededException -// There is no limit to the number of daily on-demand backups that can be taken. -// -// For most purposes, up to 500 simultaneous table operations are allowed per -// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, -// RestoreTableFromBackup, and RestoreTableToPointInTime. -// -// When you are creating a table with one or more secondary indexes, you can -// have up to 250 such requests running at a time. However, if the table or -// index specifications are complex, then DynamoDB might temporarily reduce -// the number of concurrent operations. -// -// When importing into DynamoDB, up to 50 simultaneous import table operations -// are allowed per account. -// -// There is a soft account quota of 2,500 tables. -// -// GetRecords was called with a value of more than 1000 for the limit request -// parameter. -// -// More than 2 processes are reading from the same streams shard at the same -// time. Exceeding this limit may result in request throttling. -// -// - InternalServerError -// An error occurred on the server side. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListExports -func (c *DynamoDB) ListExports(input *ListExportsInput) (*ListExportsOutput, error) { - req, out := c.ListExportsRequest(input) - return out, req.Send() -} - -// ListExportsWithContext is the same as ListExports with the addition of -// the ability to pass a context and additional request options. -// -// See ListExports for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DynamoDB) ListExportsWithContext(ctx aws.Context, input *ListExportsInput, opts ...request.Option) (*ListExportsOutput, error) { - req, out := c.ListExportsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// ListExportsPages iterates over the pages of a ListExports operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListExports method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListExports operation. -// pageNum := 0 -// err := client.ListExportsPages(params, -// func(page *dynamodb.ListExportsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -func (c *DynamoDB) ListExportsPages(input *ListExportsInput, fn func(*ListExportsOutput, bool) bool) error { - return c.ListExportsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListExportsPagesWithContext same as ListExportsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DynamoDB) ListExportsPagesWithContext(ctx aws.Context, input *ListExportsInput, fn func(*ListExportsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListExportsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListExportsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListExportsOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opListGlobalTables = "ListGlobalTables" - -// ListGlobalTablesRequest generates a "aws/request.Request" representing the -// client's request for the ListGlobalTables operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListGlobalTables for more information on using the ListGlobalTables -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the ListGlobalTablesRequest method. -// req, resp := client.ListGlobalTablesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListGlobalTables -func (c *DynamoDB) ListGlobalTablesRequest(input *ListGlobalTablesInput) (req *request.Request, output *ListGlobalTablesOutput) { - op := &request.Operation{ - Name: opListGlobalTables, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &ListGlobalTablesInput{} - } - - output = &ListGlobalTablesOutput{} - req = c.newRequest(op, input, output) - // if custom endpoint for the request is set to a non empty string, - // we skip the endpoint discovery workflow. - if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { - if aws.BoolValue(req.Config.EnableEndpointDiscovery) { - de := discovererDescribeEndpoints{ - Required: false, - EndpointCache: c.endpointCache, - Params: map[string]*string{ - "op": aws.String(req.Operation.Name), - }, - Client: c, - } - - for k, v := range de.Params { - if v == nil { - delete(de.Params, k) - } - } - - req.Handlers.Build.PushFrontNamed(request.NamedHandler{ - Name: "crr.endpointdiscovery", - Fn: de.Handler, - }) - } - } - return -} - -// ListGlobalTables API operation for Amazon DynamoDB. -// -// Lists all global tables that have a replica in the specified Region. -// -// This documentation is for version 2017.11.29 (Legacy) of global tables, which -// should be avoided for new global tables. Customers should use Global Tables -// version 2019.11.21 (Current) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GlobalTables.html) -// when possible, because it provides greater flexibility, higher efficiency, -// and consumes less write capacity than 2017.11.29 (Legacy). -// -// To determine which version you're using, see Determining the global table -// version you are using (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.DetermineVersion.html). -// To update existing global tables from version 2017.11.29 (Legacy) to version -// 2019.11.21 (Current), see Upgrading global tables (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/V2globaltables_upgrade.html). -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB's -// API operation ListGlobalTables for usage and error information. -// -// Returned Error Types: -// - InternalServerError -// An error occurred on the server side. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListGlobalTables -func (c *DynamoDB) ListGlobalTables(input *ListGlobalTablesInput) (*ListGlobalTablesOutput, error) { - req, out := c.ListGlobalTablesRequest(input) - return out, req.Send() -} - -// ListGlobalTablesWithContext is the same as ListGlobalTables with the addition of -// the ability to pass a context and additional request options. -// -// See ListGlobalTables for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DynamoDB) ListGlobalTablesWithContext(ctx aws.Context, input *ListGlobalTablesInput, opts ...request.Option) (*ListGlobalTablesOutput, error) { - req, out := c.ListGlobalTablesRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opListImports = "ListImports" - -// ListImportsRequest generates a "aws/request.Request" representing the -// client's request for the ListImports operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListImports for more information on using the ListImports -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the ListImportsRequest method. -// req, resp := client.ListImportsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListImports -func (c *DynamoDB) ListImportsRequest(input *ListImportsInput) (req *request.Request, output *ListImportsOutput) { - op := &request.Operation{ - Name: opListImports, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "PageSize", - TruncationToken: "", - }, - } - - if input == nil { - input = &ListImportsInput{} - } - - output = &ListImportsOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListImports API operation for Amazon DynamoDB. -// -// Lists completed imports within the past 90 days. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB's -// API operation ListImports for usage and error information. -// -// Returned Error Types: -// -// - LimitExceededException -// There is no limit to the number of daily on-demand backups that can be taken. -// -// For most purposes, up to 500 simultaneous table operations are allowed per -// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, -// RestoreTableFromBackup, and RestoreTableToPointInTime. -// -// When you are creating a table with one or more secondary indexes, you can -// have up to 250 such requests running at a time. However, if the table or -// index specifications are complex, then DynamoDB might temporarily reduce -// the number of concurrent operations. -// -// When importing into DynamoDB, up to 50 simultaneous import table operations -// are allowed per account. -// -// There is a soft account quota of 2,500 tables. -// -// GetRecords was called with a value of more than 1000 for the limit request -// parameter. -// -// More than 2 processes are reading from the same streams shard at the same -// time. Exceeding this limit may result in request throttling. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListImports -func (c *DynamoDB) ListImports(input *ListImportsInput) (*ListImportsOutput, error) { - req, out := c.ListImportsRequest(input) - return out, req.Send() -} - -// ListImportsWithContext is the same as ListImports with the addition of -// the ability to pass a context and additional request options. -// -// See ListImports for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DynamoDB) ListImportsWithContext(ctx aws.Context, input *ListImportsInput, opts ...request.Option) (*ListImportsOutput, error) { - req, out := c.ListImportsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// ListImportsPages iterates over the pages of a ListImports operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListImports method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListImports operation. -// pageNum := 0 -// err := client.ListImportsPages(params, -// func(page *dynamodb.ListImportsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -func (c *DynamoDB) ListImportsPages(input *ListImportsInput, fn func(*ListImportsOutput, bool) bool) error { - return c.ListImportsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListImportsPagesWithContext same as ListImportsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DynamoDB) ListImportsPagesWithContext(ctx aws.Context, input *ListImportsInput, fn func(*ListImportsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListImportsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListImportsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListImportsOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opListTables = "ListTables" - -// ListTablesRequest generates a "aws/request.Request" representing the -// client's request for the ListTables operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListTables for more information on using the ListTables -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the ListTablesRequest method. -// req, resp := client.ListTablesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListTables -func (c *DynamoDB) ListTablesRequest(input *ListTablesInput) (req *request.Request, output *ListTablesOutput) { - op := &request.Operation{ - Name: opListTables, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"ExclusiveStartTableName"}, - OutputTokens: []string{"LastEvaluatedTableName"}, - LimitToken: "Limit", - TruncationToken: "", - }, - } - - if input == nil { - input = &ListTablesInput{} - } - - output = &ListTablesOutput{} - req = c.newRequest(op, input, output) - // if custom endpoint for the request is set to a non empty string, - // we skip the endpoint discovery workflow. - if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { - if aws.BoolValue(req.Config.EnableEndpointDiscovery) { - de := discovererDescribeEndpoints{ - Required: false, - EndpointCache: c.endpointCache, - Params: map[string]*string{ - "op": aws.String(req.Operation.Name), - }, - Client: c, - } - - for k, v := range de.Params { - if v == nil { - delete(de.Params, k) - } - } - - req.Handlers.Build.PushFrontNamed(request.NamedHandler{ - Name: "crr.endpointdiscovery", - Fn: de.Handler, - }) - } - } - return -} - -// ListTables API operation for Amazon DynamoDB. -// -// Returns an array of table names associated with the current account and endpoint. -// The output from ListTables is paginated, with each page returning a maximum -// of 100 table names. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB's -// API operation ListTables for usage and error information. -// -// Returned Error Types: -// - InternalServerError -// An error occurred on the server side. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListTables -func (c *DynamoDB) ListTables(input *ListTablesInput) (*ListTablesOutput, error) { - req, out := c.ListTablesRequest(input) - return out, req.Send() -} - -// ListTablesWithContext is the same as ListTables with the addition of -// the ability to pass a context and additional request options. -// -// See ListTables for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DynamoDB) ListTablesWithContext(ctx aws.Context, input *ListTablesInput, opts ...request.Option) (*ListTablesOutput, error) { - req, out := c.ListTablesRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// ListTablesPages iterates over the pages of a ListTables operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListTables method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListTables operation. -// pageNum := 0 -// err := client.ListTablesPages(params, -// func(page *dynamodb.ListTablesOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -func (c *DynamoDB) ListTablesPages(input *ListTablesInput, fn func(*ListTablesOutput, bool) bool) error { - return c.ListTablesPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListTablesPagesWithContext same as ListTablesPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DynamoDB) ListTablesPagesWithContext(ctx aws.Context, input *ListTablesInput, fn func(*ListTablesOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListTablesInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListTablesRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListTablesOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opListTagsOfResource = "ListTagsOfResource" - -// ListTagsOfResourceRequest generates a "aws/request.Request" representing the -// client's request for the ListTagsOfResource operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListTagsOfResource for more information on using the ListTagsOfResource -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the ListTagsOfResourceRequest method. -// req, resp := client.ListTagsOfResourceRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListTagsOfResource -func (c *DynamoDB) ListTagsOfResourceRequest(input *ListTagsOfResourceInput) (req *request.Request, output *ListTagsOfResourceOutput) { - op := &request.Operation{ - Name: opListTagsOfResource, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &ListTagsOfResourceInput{} - } - - output = &ListTagsOfResourceOutput{} - req = c.newRequest(op, input, output) - // if custom endpoint for the request is set to a non empty string, - // we skip the endpoint discovery workflow. - if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { - if aws.BoolValue(req.Config.EnableEndpointDiscovery) { - de := discovererDescribeEndpoints{ - Required: false, - EndpointCache: c.endpointCache, - Params: map[string]*string{ - "op": aws.String(req.Operation.Name), - }, - Client: c, - } - - for k, v := range de.Params { - if v == nil { - delete(de.Params, k) - } - } - - req.Handlers.Build.PushFrontNamed(request.NamedHandler{ - Name: "crr.endpointdiscovery", - Fn: de.Handler, - }) - } - } - return -} - -// ListTagsOfResource API operation for Amazon DynamoDB. -// -// List all tags on an Amazon DynamoDB resource. You can call ListTagsOfResource -// up to 10 times per second, per account. -// -// For an overview on tagging DynamoDB resources, see Tagging for DynamoDB (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html) -// in the Amazon DynamoDB Developer Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB's -// API operation ListTagsOfResource for usage and error information. -// -// Returned Error Types: -// -// - ResourceNotFoundException -// The operation tried to access a nonexistent table or index. The resource -// might not be specified correctly, or its status might not be ACTIVE. -// -// - InternalServerError -// An error occurred on the server side. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListTagsOfResource -func (c *DynamoDB) ListTagsOfResource(input *ListTagsOfResourceInput) (*ListTagsOfResourceOutput, error) { - req, out := c.ListTagsOfResourceRequest(input) - return out, req.Send() -} - -// ListTagsOfResourceWithContext is the same as ListTagsOfResource with the addition of -// the ability to pass a context and additional request options. -// -// See ListTagsOfResource for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DynamoDB) ListTagsOfResourceWithContext(ctx aws.Context, input *ListTagsOfResourceInput, opts ...request.Option) (*ListTagsOfResourceOutput, error) { - req, out := c.ListTagsOfResourceRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutItem = "PutItem" - -// PutItemRequest generates a "aws/request.Request" representing the -// client's request for the PutItem operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See PutItem for more information on using the PutItem -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the PutItemRequest method. -// req, resp := client.PutItemRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/PutItem -func (c *DynamoDB) PutItemRequest(input *PutItemInput) (req *request.Request, output *PutItemOutput) { - op := &request.Operation{ - Name: opPutItem, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &PutItemInput{} - } - - output = &PutItemOutput{} - req = c.newRequest(op, input, output) - // if custom endpoint for the request is set to a non empty string, - // we skip the endpoint discovery workflow. - if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { - if aws.BoolValue(req.Config.EnableEndpointDiscovery) { - de := discovererDescribeEndpoints{ - Required: false, - EndpointCache: c.endpointCache, - Params: map[string]*string{ - "op": aws.String(req.Operation.Name), - }, - Client: c, - } - - for k, v := range de.Params { - if v == nil { - delete(de.Params, k) - } - } - - req.Handlers.Build.PushFrontNamed(request.NamedHandler{ - Name: "crr.endpointdiscovery", - Fn: de.Handler, - }) - } - } - return -} - -// PutItem API operation for Amazon DynamoDB. -// -// Creates a new item, or replaces an old item with a new item. If an item that -// has the same primary key as the new item already exists in the specified -// table, the new item completely replaces the existing item. You can perform -// a conditional put operation (add a new item if one with the specified primary -// key doesn't exist), or replace an existing item if it has certain attribute -// values. You can return the item's attribute values in the same operation, -// using the ReturnValues parameter. -// -// When you add an item, the primary key attributes are the only required attributes. -// -// Empty String and Binary attribute values are allowed. Attribute values of -// type String and Binary must have a length greater than zero if the attribute -// is used as a key attribute for a table or index. Set type attributes cannot -// be empty. -// -// Invalid Requests with empty values will be rejected with a ValidationException -// exception. -// -// To prevent a new item from replacing an existing item, use a conditional -// expression that contains the attribute_not_exists function with the name -// of the attribute being used as the partition key for the table. Since every -// record must contain that attribute, the attribute_not_exists function will -// only succeed if no matching item exists. -// -// For more information about PutItem, see Working with Items (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithItems.html) -// in the Amazon DynamoDB Developer Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB's -// API operation PutItem for usage and error information. -// -// Returned Error Types: -// -// - ConditionalCheckFailedException -// A condition specified in the operation could not be evaluated. -// -// - ProvisionedThroughputExceededException -// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB -// automatically retry requests that receive this exception. Your request is -// eventually successful, unless your retry queue is too large to finish. Reduce -// the frequency of requests and use exponential backoff. For more information, -// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) -// in the Amazon DynamoDB Developer Guide. -// -// - ResourceNotFoundException -// The operation tried to access a nonexistent table or index. The resource -// might not be specified correctly, or its status might not be ACTIVE. -// -// - ItemCollectionSizeLimitExceededException -// An item collection is too large. This exception is only returned for tables -// that have one or more local secondary indexes. -// -// - TransactionConflictException -// Operation was rejected because there is an ongoing transaction for the item. -// -// - RequestLimitExceeded -// Throughput exceeds the current throughput quota for your account. Please -// contact Amazon Web Services Support (https://aws.amazon.com/support) to request -// a quota increase. -// -// - InternalServerError -// An error occurred on the server side. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/PutItem -func (c *DynamoDB) PutItem(input *PutItemInput) (*PutItemOutput, error) { - req, out := c.PutItemRequest(input) - return out, req.Send() -} - -// PutItemWithContext is the same as PutItem with the addition of -// the ability to pass a context and additional request options. -// -// See PutItem for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DynamoDB) PutItemWithContext(ctx aws.Context, input *PutItemInput, opts ...request.Option) (*PutItemOutput, error) { - req, out := c.PutItemRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutResourcePolicy = "PutResourcePolicy" - -// PutResourcePolicyRequest generates a "aws/request.Request" representing the -// client's request for the PutResourcePolicy operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See PutResourcePolicy for more information on using the PutResourcePolicy -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the PutResourcePolicyRequest method. -// req, resp := client.PutResourcePolicyRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/PutResourcePolicy -func (c *DynamoDB) PutResourcePolicyRequest(input *PutResourcePolicyInput) (req *request.Request, output *PutResourcePolicyOutput) { - op := &request.Operation{ - Name: opPutResourcePolicy, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &PutResourcePolicyInput{} - } - - output = &PutResourcePolicyOutput{} - req = c.newRequest(op, input, output) - // if custom endpoint for the request is set to a non empty string, - // we skip the endpoint discovery workflow. - if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { - if aws.BoolValue(req.Config.EnableEndpointDiscovery) { - de := discovererDescribeEndpoints{ - Required: false, - EndpointCache: c.endpointCache, - Params: map[string]*string{ - "op": aws.String(req.Operation.Name), - }, - Client: c, - } - - for k, v := range de.Params { - if v == nil { - delete(de.Params, k) - } - } - - req.Handlers.Build.PushFrontNamed(request.NamedHandler{ - Name: "crr.endpointdiscovery", - Fn: de.Handler, - }) - } - } - return -} - -// PutResourcePolicy API operation for Amazon DynamoDB. -// -// Attaches a resource-based policy document to the resource, which can be a -// table or stream. When you attach a resource-based policy using this API, -// the policy application is eventually consistent (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadConsistency.html). -// -// PutResourcePolicy is an idempotent operation; running it multiple times on -// the same resource using the same policy document will return the same revision -// ID. If you specify an ExpectedRevisionId that doesn't match the current policy's -// RevisionId, the PolicyNotFoundException will be returned. -// -// PutResourcePolicy is an asynchronous operation. If you issue a GetResourcePolicy -// request immediately after a PutResourcePolicy request, DynamoDB might return -// your previous policy, if there was one, or return the PolicyNotFoundException. -// This is because GetResourcePolicy uses an eventually consistent query, and -// the metadata for your policy or table might not be available at that moment. -// Wait for a few seconds, and then try the GetResourcePolicy request again. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB's -// API operation PutResourcePolicy for usage and error information. -// -// Returned Error Types: -// -// - ResourceNotFoundException -// The operation tried to access a nonexistent table or index. The resource -// might not be specified correctly, or its status might not be ACTIVE. -// -// - InternalServerError -// An error occurred on the server side. -// -// - LimitExceededException -// There is no limit to the number of daily on-demand backups that can be taken. -// -// For most purposes, up to 500 simultaneous table operations are allowed per -// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, -// RestoreTableFromBackup, and RestoreTableToPointInTime. -// -// When you are creating a table with one or more secondary indexes, you can -// have up to 250 such requests running at a time. However, if the table or -// index specifications are complex, then DynamoDB might temporarily reduce -// the number of concurrent operations. -// -// When importing into DynamoDB, up to 50 simultaneous import table operations -// are allowed per account. -// -// There is a soft account quota of 2,500 tables. -// -// GetRecords was called with a value of more than 1000 for the limit request -// parameter. -// -// More than 2 processes are reading from the same streams shard at the same -// time. Exceeding this limit may result in request throttling. -// -// - PolicyNotFoundException -// The operation tried to access a nonexistent resource-based policy. -// -// If you specified an ExpectedRevisionId, it's possible that a policy is present -// for the resource but its revision ID didn't match the expected value. -// -// - ResourceInUseException -// The operation conflicts with the resource's availability. For example, you -// attempted to recreate an existing table, or tried to delete a table currently -// in the CREATING state. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/PutResourcePolicy -func (c *DynamoDB) PutResourcePolicy(input *PutResourcePolicyInput) (*PutResourcePolicyOutput, error) { - req, out := c.PutResourcePolicyRequest(input) - return out, req.Send() -} - -// PutResourcePolicyWithContext is the same as PutResourcePolicy with the addition of -// the ability to pass a context and additional request options. -// -// See PutResourcePolicy for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DynamoDB) PutResourcePolicyWithContext(ctx aws.Context, input *PutResourcePolicyInput, opts ...request.Option) (*PutResourcePolicyOutput, error) { - req, out := c.PutResourcePolicyRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opQuery = "Query" - -// QueryRequest generates a "aws/request.Request" representing the -// client's request for the Query operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See Query for more information on using the Query -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the QueryRequest method. -// req, resp := client.QueryRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/Query -func (c *DynamoDB) QueryRequest(input *QueryInput) (req *request.Request, output *QueryOutput) { - op := &request.Operation{ - Name: opQuery, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"ExclusiveStartKey"}, - OutputTokens: []string{"LastEvaluatedKey"}, - LimitToken: "Limit", - TruncationToken: "", - }, - } - - if input == nil { - input = &QueryInput{} - } - - output = &QueryOutput{} - req = c.newRequest(op, input, output) - // if custom endpoint for the request is set to a non empty string, - // we skip the endpoint discovery workflow. - if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { - if aws.BoolValue(req.Config.EnableEndpointDiscovery) { - de := discovererDescribeEndpoints{ - Required: false, - EndpointCache: c.endpointCache, - Params: map[string]*string{ - "op": aws.String(req.Operation.Name), - }, - Client: c, - } - - for k, v := range de.Params { - if v == nil { - delete(de.Params, k) - } - } - - req.Handlers.Build.PushFrontNamed(request.NamedHandler{ - Name: "crr.endpointdiscovery", - Fn: de.Handler, - }) - } - } - return -} - -// Query API operation for Amazon DynamoDB. -// -// You must provide the name of the partition key attribute and a single value -// for that attribute. Query returns all items with that partition key value. -// Optionally, you can provide a sort key attribute and use a comparison operator -// to refine the search results. -// -// Use the KeyConditionExpression parameter to provide a specific value for -// the partition key. The Query operation will return all of the items from -// the table or index with that partition key value. You can optionally narrow -// the scope of the Query operation by specifying a sort key value and a comparison -// operator in KeyConditionExpression. To further refine the Query results, -// you can optionally provide a FilterExpression. A FilterExpression determines -// which items within the results should be returned to you. All of the other -// results are discarded. -// -// A Query operation always returns a result set. If no matching items are found, -// the result set will be empty. Queries that do not return results consume -// the minimum number of read capacity units for that type of read operation. -// -// DynamoDB calculates the number of read capacity units consumed based on item -// size, not on the amount of data that is returned to an application. The number -// of capacity units consumed will be the same whether you request all of the -// attributes (the default behavior) or just some of them (using a projection -// expression). The number will also be the same whether or not you use a FilterExpression. -// -// Query results are always sorted by the sort key value. If the data type of -// the sort key is Number, the results are returned in numeric order; otherwise, -// the results are returned in order of UTF-8 bytes. By default, the sort order -// is ascending. To reverse the order, set the ScanIndexForward parameter to -// false. -// -// A single Query operation will read up to the maximum number of items set -// (if using the Limit parameter) or a maximum of 1 MB of data and then apply -// any filtering to the results using FilterExpression. If LastEvaluatedKey -// is present in the response, you will need to paginate the result set. For -// more information, see Paginating the Results (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Query.html#Query.Pagination) -// in the Amazon DynamoDB Developer Guide. -// -// FilterExpression is applied after a Query finishes, but before the results -// are returned. A FilterExpression cannot contain partition key or sort key -// attributes. You need to specify those attributes in the KeyConditionExpression. -// -// A Query operation can return an empty result set and a LastEvaluatedKey if -// all the items read for the page of results are filtered out. -// -// You can query a table, a local secondary index, or a global secondary index. -// For a query on a table or on a local secondary index, you can set the ConsistentRead -// parameter to true and obtain a strongly consistent result. Global secondary -// indexes support eventually consistent reads only, so do not specify ConsistentRead -// when querying a global secondary index. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB's -// API operation Query for usage and error information. -// -// Returned Error Types: -// -// - ProvisionedThroughputExceededException -// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB -// automatically retry requests that receive this exception. Your request is -// eventually successful, unless your retry queue is too large to finish. Reduce -// the frequency of requests and use exponential backoff. For more information, -// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) -// in the Amazon DynamoDB Developer Guide. -// -// - ResourceNotFoundException -// The operation tried to access a nonexistent table or index. The resource -// might not be specified correctly, or its status might not be ACTIVE. -// -// - RequestLimitExceeded -// Throughput exceeds the current throughput quota for your account. Please -// contact Amazon Web Services Support (https://aws.amazon.com/support) to request -// a quota increase. -// -// - InternalServerError -// An error occurred on the server side. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/Query -func (c *DynamoDB) Query(input *QueryInput) (*QueryOutput, error) { - req, out := c.QueryRequest(input) - return out, req.Send() -} - -// QueryWithContext is the same as Query with the addition of -// the ability to pass a context and additional request options. -// -// See Query for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DynamoDB) QueryWithContext(ctx aws.Context, input *QueryInput, opts ...request.Option) (*QueryOutput, error) { - req, out := c.QueryRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// QueryPages iterates over the pages of a Query operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See Query method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a Query operation. -// pageNum := 0 -// err := client.QueryPages(params, -// func(page *dynamodb.QueryOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -func (c *DynamoDB) QueryPages(input *QueryInput, fn func(*QueryOutput, bool) bool) error { - return c.QueryPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// QueryPagesWithContext same as QueryPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DynamoDB) QueryPagesWithContext(ctx aws.Context, input *QueryInput, fn func(*QueryOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *QueryInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.QueryRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*QueryOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opRestoreTableFromBackup = "RestoreTableFromBackup" - -// RestoreTableFromBackupRequest generates a "aws/request.Request" representing the -// client's request for the RestoreTableFromBackup operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See RestoreTableFromBackup for more information on using the RestoreTableFromBackup -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the RestoreTableFromBackupRequest method. -// req, resp := client.RestoreTableFromBackupRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/RestoreTableFromBackup -func (c *DynamoDB) RestoreTableFromBackupRequest(input *RestoreTableFromBackupInput) (req *request.Request, output *RestoreTableFromBackupOutput) { - op := &request.Operation{ - Name: opRestoreTableFromBackup, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &RestoreTableFromBackupInput{} - } - - output = &RestoreTableFromBackupOutput{} - req = c.newRequest(op, input, output) - // if custom endpoint for the request is set to a non empty string, - // we skip the endpoint discovery workflow. - if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { - if aws.BoolValue(req.Config.EnableEndpointDiscovery) { - de := discovererDescribeEndpoints{ - Required: false, - EndpointCache: c.endpointCache, - Params: map[string]*string{ - "op": aws.String(req.Operation.Name), - }, - Client: c, - } - - for k, v := range de.Params { - if v == nil { - delete(de.Params, k) - } - } - - req.Handlers.Build.PushFrontNamed(request.NamedHandler{ - Name: "crr.endpointdiscovery", - Fn: de.Handler, - }) - } - } - return -} - -// RestoreTableFromBackup API operation for Amazon DynamoDB. -// -// Creates a new table from an existing backup. Any number of users can execute -// up to 50 concurrent restores (any type of restore) in a given account. -// -// You can call RestoreTableFromBackup at a maximum rate of 10 times per second. -// -// You must manually set up the following on the restored table: -// -// - Auto scaling policies -// -// - IAM policies -// -// - Amazon CloudWatch metrics and alarms -// -// - Tags -// -// - Stream settings -// -// - Time to Live (TTL) settings -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB's -// API operation RestoreTableFromBackup for usage and error information. -// -// Returned Error Types: -// -// - TableAlreadyExistsException -// A target table with the specified name already exists. -// -// - TableInUseException -// A target table with the specified name is either being created or deleted. -// -// - BackupNotFoundException -// Backup not found for the given BackupARN. -// -// - BackupInUseException -// There is another ongoing conflicting backup control plane operation on the -// table. The backup is either being created, deleted or restored to a table. -// -// - LimitExceededException -// There is no limit to the number of daily on-demand backups that can be taken. -// -// For most purposes, up to 500 simultaneous table operations are allowed per -// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, -// RestoreTableFromBackup, and RestoreTableToPointInTime. -// -// When you are creating a table with one or more secondary indexes, you can -// have up to 250 such requests running at a time. However, if the table or -// index specifications are complex, then DynamoDB might temporarily reduce -// the number of concurrent operations. -// -// When importing into DynamoDB, up to 50 simultaneous import table operations -// are allowed per account. -// -// There is a soft account quota of 2,500 tables. -// -// GetRecords was called with a value of more than 1000 for the limit request -// parameter. -// -// More than 2 processes are reading from the same streams shard at the same -// time. Exceeding this limit may result in request throttling. -// -// - InternalServerError -// An error occurred on the server side. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/RestoreTableFromBackup -func (c *DynamoDB) RestoreTableFromBackup(input *RestoreTableFromBackupInput) (*RestoreTableFromBackupOutput, error) { - req, out := c.RestoreTableFromBackupRequest(input) - return out, req.Send() -} - -// RestoreTableFromBackupWithContext is the same as RestoreTableFromBackup with the addition of -// the ability to pass a context and additional request options. -// -// See RestoreTableFromBackup for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DynamoDB) RestoreTableFromBackupWithContext(ctx aws.Context, input *RestoreTableFromBackupInput, opts ...request.Option) (*RestoreTableFromBackupOutput, error) { - req, out := c.RestoreTableFromBackupRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opRestoreTableToPointInTime = "RestoreTableToPointInTime" - -// RestoreTableToPointInTimeRequest generates a "aws/request.Request" representing the -// client's request for the RestoreTableToPointInTime operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See RestoreTableToPointInTime for more information on using the RestoreTableToPointInTime -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the RestoreTableToPointInTimeRequest method. -// req, resp := client.RestoreTableToPointInTimeRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/RestoreTableToPointInTime -func (c *DynamoDB) RestoreTableToPointInTimeRequest(input *RestoreTableToPointInTimeInput) (req *request.Request, output *RestoreTableToPointInTimeOutput) { - op := &request.Operation{ - Name: opRestoreTableToPointInTime, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &RestoreTableToPointInTimeInput{} - } - - output = &RestoreTableToPointInTimeOutput{} - req = c.newRequest(op, input, output) - // if custom endpoint for the request is set to a non empty string, - // we skip the endpoint discovery workflow. - if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { - if aws.BoolValue(req.Config.EnableEndpointDiscovery) { - de := discovererDescribeEndpoints{ - Required: false, - EndpointCache: c.endpointCache, - Params: map[string]*string{ - "op": aws.String(req.Operation.Name), - }, - Client: c, - } - - for k, v := range de.Params { - if v == nil { - delete(de.Params, k) - } - } - - req.Handlers.Build.PushFrontNamed(request.NamedHandler{ - Name: "crr.endpointdiscovery", - Fn: de.Handler, - }) - } - } - return -} - -// RestoreTableToPointInTime API operation for Amazon DynamoDB. -// -// Restores the specified table to the specified point in time within EarliestRestorableDateTime -// and LatestRestorableDateTime. You can restore your table to any point in -// time during the last 35 days. Any number of users can execute up to 50 concurrent -// restores (any type of restore) in a given account. -// -// When you restore using point in time recovery, DynamoDB restores your table -// data to the state based on the selected date and time (day:hour:minute:second) -// to a new table. -// -// Along with data, the following are also included on the new restored table -// using point in time recovery: -// -// - Global secondary indexes (GSIs) -// -// - Local secondary indexes (LSIs) -// -// - Provisioned read and write capacity -// -// - Encryption settings All these settings come from the current settings -// of the source table at the time of restore. -// -// You must manually set up the following on the restored table: -// -// - Auto scaling policies -// -// - IAM policies -// -// - Amazon CloudWatch metrics and alarms -// -// - Tags -// -// - Stream settings -// -// - Time to Live (TTL) settings -// -// - Point in time recovery settings -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB's -// API operation RestoreTableToPointInTime for usage and error information. -// -// Returned Error Types: -// -// - TableAlreadyExistsException -// A target table with the specified name already exists. -// -// - TableNotFoundException -// A source table with the name TableName does not currently exist within the -// subscriber's account or the subscriber is operating in the wrong Amazon Web -// Services Region. -// -// - TableInUseException -// A target table with the specified name is either being created or deleted. -// -// - LimitExceededException -// There is no limit to the number of daily on-demand backups that can be taken. -// -// For most purposes, up to 500 simultaneous table operations are allowed per -// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, -// RestoreTableFromBackup, and RestoreTableToPointInTime. -// -// When you are creating a table with one or more secondary indexes, you can -// have up to 250 such requests running at a time. However, if the table or -// index specifications are complex, then DynamoDB might temporarily reduce -// the number of concurrent operations. -// -// When importing into DynamoDB, up to 50 simultaneous import table operations -// are allowed per account. -// -// There is a soft account quota of 2,500 tables. -// -// GetRecords was called with a value of more than 1000 for the limit request -// parameter. -// -// More than 2 processes are reading from the same streams shard at the same -// time. Exceeding this limit may result in request throttling. -// -// - InvalidRestoreTimeException -// An invalid restore time was specified. RestoreDateTime must be between EarliestRestorableDateTime -// and LatestRestorableDateTime. -// -// - PointInTimeRecoveryUnavailableException -// Point in time recovery has not yet been enabled for this source table. -// -// - InternalServerError -// An error occurred on the server side. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/RestoreTableToPointInTime -func (c *DynamoDB) RestoreTableToPointInTime(input *RestoreTableToPointInTimeInput) (*RestoreTableToPointInTimeOutput, error) { - req, out := c.RestoreTableToPointInTimeRequest(input) - return out, req.Send() -} - -// RestoreTableToPointInTimeWithContext is the same as RestoreTableToPointInTime with the addition of -// the ability to pass a context and additional request options. -// -// See RestoreTableToPointInTime for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DynamoDB) RestoreTableToPointInTimeWithContext(ctx aws.Context, input *RestoreTableToPointInTimeInput, opts ...request.Option) (*RestoreTableToPointInTimeOutput, error) { - req, out := c.RestoreTableToPointInTimeRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opScan = "Scan" - -// ScanRequest generates a "aws/request.Request" representing the -// client's request for the Scan operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See Scan for more information on using the Scan -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the ScanRequest method. -// req, resp := client.ScanRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/Scan -func (c *DynamoDB) ScanRequest(input *ScanInput) (req *request.Request, output *ScanOutput) { - op := &request.Operation{ - Name: opScan, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"ExclusiveStartKey"}, - OutputTokens: []string{"LastEvaluatedKey"}, - LimitToken: "Limit", - TruncationToken: "", - }, - } - - if input == nil { - input = &ScanInput{} - } - - output = &ScanOutput{} - req = c.newRequest(op, input, output) - // if custom endpoint for the request is set to a non empty string, - // we skip the endpoint discovery workflow. - if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { - if aws.BoolValue(req.Config.EnableEndpointDiscovery) { - de := discovererDescribeEndpoints{ - Required: false, - EndpointCache: c.endpointCache, - Params: map[string]*string{ - "op": aws.String(req.Operation.Name), - }, - Client: c, - } - - for k, v := range de.Params { - if v == nil { - delete(de.Params, k) - } - } - - req.Handlers.Build.PushFrontNamed(request.NamedHandler{ - Name: "crr.endpointdiscovery", - Fn: de.Handler, - }) - } - } - return -} - -// Scan API operation for Amazon DynamoDB. -// -// The Scan operation returns one or more items and item attributes by accessing -// every item in a table or a secondary index. To have DynamoDB return fewer -// items, you can provide a FilterExpression operation. -// -// If the total size of scanned items exceeds the maximum dataset size limit -// of 1 MB, the scan completes and results are returned to the user. The LastEvaluatedKey -// value is also returned and the requestor can use the LastEvaluatedKey to -// continue the scan in a subsequent operation. Each scan response also includes -// number of items that were scanned (ScannedCount) as part of the request. -// If using a FilterExpression, a scan result can result in no items meeting -// the criteria and the Count will result in zero. If you did not use a FilterExpression -// in the scan request, then Count is the same as ScannedCount. -// -// Count and ScannedCount only return the count of items specific to a single -// scan request and, unless the table is less than 1MB, do not represent the -// total number of items in the table. -// -// A single Scan operation first reads up to the maximum number of items set -// (if using the Limit parameter) or a maximum of 1 MB of data and then applies -// any filtering to the results if a FilterExpression is provided. If LastEvaluatedKey -// is present in the response, pagination is required to complete the full table -// scan. For more information, see Paginating the Results (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Scan.html#Scan.Pagination) -// in the Amazon DynamoDB Developer Guide. -// -// Scan operations proceed sequentially; however, for faster performance on -// a large table or secondary index, applications can request a parallel Scan -// operation by providing the Segment and TotalSegments parameters. For more -// information, see Parallel Scan (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Scan.html#Scan.ParallelScan) -// in the Amazon DynamoDB Developer Guide. -// -// By default, a Scan uses eventually consistent reads when accessing the items -// in a table. Therefore, the results from an eventually consistent Scan may -// not include the latest item changes at the time the scan iterates through -// each item in the table. If you require a strongly consistent read of each -// item as the scan iterates through the items in the table, you can set the -// ConsistentRead parameter to true. Strong consistency only relates to the -// consistency of the read at the item level. -// -// DynamoDB does not provide snapshot isolation for a scan operation when the -// ConsistentRead parameter is set to true. Thus, a DynamoDB scan operation -// does not guarantee that all reads in a scan see a consistent snapshot of -// the table when the scan operation was requested. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB's -// API operation Scan for usage and error information. -// -// Returned Error Types: -// -// - ProvisionedThroughputExceededException -// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB -// automatically retry requests that receive this exception. Your request is -// eventually successful, unless your retry queue is too large to finish. Reduce -// the frequency of requests and use exponential backoff. For more information, -// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) -// in the Amazon DynamoDB Developer Guide. -// -// - ResourceNotFoundException -// The operation tried to access a nonexistent table or index. The resource -// might not be specified correctly, or its status might not be ACTIVE. -// -// - RequestLimitExceeded -// Throughput exceeds the current throughput quota for your account. Please -// contact Amazon Web Services Support (https://aws.amazon.com/support) to request -// a quota increase. -// -// - InternalServerError -// An error occurred on the server side. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/Scan -func (c *DynamoDB) Scan(input *ScanInput) (*ScanOutput, error) { - req, out := c.ScanRequest(input) - return out, req.Send() -} - -// ScanWithContext is the same as Scan with the addition of -// the ability to pass a context and additional request options. -// -// See Scan for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DynamoDB) ScanWithContext(ctx aws.Context, input *ScanInput, opts ...request.Option) (*ScanOutput, error) { - req, out := c.ScanRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// ScanPages iterates over the pages of a Scan operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See Scan method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a Scan operation. -// pageNum := 0 -// err := client.ScanPages(params, -// func(page *dynamodb.ScanOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -func (c *DynamoDB) ScanPages(input *ScanInput, fn func(*ScanOutput, bool) bool) error { - return c.ScanPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ScanPagesWithContext same as ScanPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DynamoDB) ScanPagesWithContext(ctx aws.Context, input *ScanInput, fn func(*ScanOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ScanInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ScanRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ScanOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opTagResource = "TagResource" - -// TagResourceRequest generates a "aws/request.Request" representing the -// client's request for the TagResource operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See TagResource for more information on using the TagResource -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the TagResourceRequest method. -// req, resp := client.TagResourceRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/TagResource -func (c *DynamoDB) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { - op := &request.Operation{ - Name: opTagResource, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &TagResourceInput{} - } - - output = &TagResourceOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - // if custom endpoint for the request is set to a non empty string, - // we skip the endpoint discovery workflow. - if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { - if aws.BoolValue(req.Config.EnableEndpointDiscovery) { - de := discovererDescribeEndpoints{ - Required: false, - EndpointCache: c.endpointCache, - Params: map[string]*string{ - "op": aws.String(req.Operation.Name), - }, - Client: c, - } - - for k, v := range de.Params { - if v == nil { - delete(de.Params, k) - } - } - - req.Handlers.Build.PushFrontNamed(request.NamedHandler{ - Name: "crr.endpointdiscovery", - Fn: de.Handler, - }) - } - } - return -} - -// TagResource API operation for Amazon DynamoDB. -// -// Associate a set of tags with an Amazon DynamoDB resource. You can then activate -// these user-defined tags so that they appear on the Billing and Cost Management -// console for cost allocation tracking. You can call TagResource up to five -// times per second, per account. -// -// For an overview on tagging DynamoDB resources, see Tagging for DynamoDB (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html) -// in the Amazon DynamoDB Developer Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB's -// API operation TagResource for usage and error information. -// -// Returned Error Types: -// -// - LimitExceededException -// There is no limit to the number of daily on-demand backups that can be taken. -// -// For most purposes, up to 500 simultaneous table operations are allowed per -// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, -// RestoreTableFromBackup, and RestoreTableToPointInTime. -// -// When you are creating a table with one or more secondary indexes, you can -// have up to 250 such requests running at a time. However, if the table or -// index specifications are complex, then DynamoDB might temporarily reduce -// the number of concurrent operations. -// -// When importing into DynamoDB, up to 50 simultaneous import table operations -// are allowed per account. -// -// There is a soft account quota of 2,500 tables. -// -// GetRecords was called with a value of more than 1000 for the limit request -// parameter. -// -// More than 2 processes are reading from the same streams shard at the same -// time. Exceeding this limit may result in request throttling. -// -// - ResourceNotFoundException -// The operation tried to access a nonexistent table or index. The resource -// might not be specified correctly, or its status might not be ACTIVE. -// -// - InternalServerError -// An error occurred on the server side. -// -// - ResourceInUseException -// The operation conflicts with the resource's availability. For example, you -// attempted to recreate an existing table, or tried to delete a table currently -// in the CREATING state. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/TagResource -func (c *DynamoDB) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { - req, out := c.TagResourceRequest(input) - return out, req.Send() -} - -// TagResourceWithContext is the same as TagResource with the addition of -// the ability to pass a context and additional request options. -// -// See TagResource for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DynamoDB) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { - req, out := c.TagResourceRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opTransactGetItems = "TransactGetItems" - -// TransactGetItemsRequest generates a "aws/request.Request" representing the -// client's request for the TransactGetItems operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See TransactGetItems for more information on using the TransactGetItems -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the TransactGetItemsRequest method. -// req, resp := client.TransactGetItemsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/TransactGetItems -func (c *DynamoDB) TransactGetItemsRequest(input *TransactGetItemsInput) (req *request.Request, output *TransactGetItemsOutput) { - op := &request.Operation{ - Name: opTransactGetItems, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &TransactGetItemsInput{} - } - - output = &TransactGetItemsOutput{} - req = c.newRequest(op, input, output) - // if custom endpoint for the request is set to a non empty string, - // we skip the endpoint discovery workflow. - if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { - if aws.BoolValue(req.Config.EnableEndpointDiscovery) { - de := discovererDescribeEndpoints{ - Required: false, - EndpointCache: c.endpointCache, - Params: map[string]*string{ - "op": aws.String(req.Operation.Name), - }, - Client: c, - } - - for k, v := range de.Params { - if v == nil { - delete(de.Params, k) - } - } - - req.Handlers.Build.PushFrontNamed(request.NamedHandler{ - Name: "crr.endpointdiscovery", - Fn: de.Handler, - }) - } - } - return -} - -// TransactGetItems API operation for Amazon DynamoDB. -// -// TransactGetItems is a synchronous operation that atomically retrieves multiple -// items from one or more tables (but not from indexes) in a single account -// and Region. A TransactGetItems call can contain up to 100 TransactGetItem -// objects, each of which contains a Get structure that specifies an item to -// retrieve from a table in the account and Region. A call to TransactGetItems -// cannot retrieve items from tables in more than one Amazon Web Services account -// or Region. The aggregate size of the items in the transaction cannot exceed -// 4 MB. -// -// DynamoDB rejects the entire TransactGetItems request if any of the following -// is true: -// -// - A conflicting operation is in the process of updating an item to be -// read. -// -// - There is insufficient provisioned capacity for the transaction to be -// completed. -// -// - There is a user error, such as an invalid data format. -// -// - The aggregate size of the items in the transaction exceeded 4 MB. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB's -// API operation TransactGetItems for usage and error information. -// -// Returned Error Types: -// -// - ResourceNotFoundException -// The operation tried to access a nonexistent table or index. The resource -// might not be specified correctly, or its status might not be ACTIVE. -// -// - TransactionCanceledException -// The entire transaction request was canceled. -// -// DynamoDB cancels a TransactWriteItems request under the following circumstances: -// -// - A condition in one of the condition expressions is not met. -// -// - A table in the TransactWriteItems request is in a different account -// or region. -// -// - More than one action in the TransactWriteItems operation targets the -// same item. -// -// - There is insufficient provisioned capacity for the transaction to be -// completed. -// -// - An item size becomes too large (larger than 400 KB), or a local secondary -// index (LSI) becomes too large, or a similar validation error occurs because -// of changes made by the transaction. -// -// - There is a user error, such as an invalid data format. -// -// - There is an ongoing TransactWriteItems operation that conflicts with -// a concurrent TransactWriteItems request. In this case the TransactWriteItems -// operation fails with a TransactionCanceledException. -// -// DynamoDB cancels a TransactGetItems request under the following circumstances: -// -// - There is an ongoing TransactGetItems operation that conflicts with a -// concurrent PutItem, UpdateItem, DeleteItem or TransactWriteItems request. -// In this case the TransactGetItems operation fails with a TransactionCanceledException. -// -// - A table in the TransactGetItems request is in a different account or -// region. -// -// - There is insufficient provisioned capacity for the transaction to be -// completed. -// -// - There is a user error, such as an invalid data format. -// -// If using Java, DynamoDB lists the cancellation reasons on the CancellationReasons -// property. This property is not set for other languages. Transaction cancellation -// reasons are ordered in the order of requested items, if an item has no error -// it will have None code and Null message. -// -// Cancellation reason codes and possible error messages: -// -// - No Errors: Code: None Message: null -// -// - Conditional Check Failed: Code: ConditionalCheckFailed Message: The -// conditional request failed. -// -// - Item Collection Size Limit Exceeded: Code: ItemCollectionSizeLimitExceeded -// Message: Collection size exceeded. -// -// - Transaction Conflict: Code: TransactionConflict Message: Transaction -// is ongoing for the item. -// -// - Provisioned Throughput Exceeded: Code: ProvisionedThroughputExceeded -// Messages: The level of configured provisioned throughput for the table -// was exceeded. Consider increasing your provisioning level with the UpdateTable -// API. This Message is received when provisioned throughput is exceeded -// is on a provisioned DynamoDB table. The level of configured provisioned -// throughput for one or more global secondary indexes of the table was exceeded. -// Consider increasing your provisioning level for the under-provisioned -// global secondary indexes with the UpdateTable API. This message is returned -// when provisioned throughput is exceeded is on a provisioned GSI. -// -// - Throttling Error: Code: ThrottlingError Messages: Throughput exceeds -// the current capacity of your table or index. DynamoDB is automatically -// scaling your table or index so please try again shortly. If exceptions -// persist, check if you have a hot key: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/bp-partition-key-design.html. -// This message is returned when writes get throttled on an On-Demand table -// as DynamoDB is automatically scaling the table. Throughput exceeds the -// current capacity for one or more global secondary indexes. DynamoDB is -// automatically scaling your index so please try again shortly. This message -// is returned when writes get throttled on an On-Demand GSI as DynamoDB -// is automatically scaling the GSI. -// -// - Validation Error: Code: ValidationError Messages: One or more parameter -// values were invalid. The update expression attempted to update the secondary -// index key beyond allowed size limits. The update expression attempted -// to update the secondary index key to unsupported type. An operand in the -// update expression has an incorrect data type. Item size to update has -// exceeded the maximum allowed size. Number overflow. Attempting to store -// a number with magnitude larger than supported range. Type mismatch for -// attribute to update. Nesting Levels have exceeded supported limits. The -// document path provided in the update expression is invalid for update. -// The provided expression refers to an attribute that does not exist in -// the item. -// -// - ProvisionedThroughputExceededException -// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB -// automatically retry requests that receive this exception. Your request is -// eventually successful, unless your retry queue is too large to finish. Reduce -// the frequency of requests and use exponential backoff. For more information, -// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) -// in the Amazon DynamoDB Developer Guide. -// -// - RequestLimitExceeded -// Throughput exceeds the current throughput quota for your account. Please -// contact Amazon Web Services Support (https://aws.amazon.com/support) to request -// a quota increase. -// -// - InternalServerError -// An error occurred on the server side. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/TransactGetItems -func (c *DynamoDB) TransactGetItems(input *TransactGetItemsInput) (*TransactGetItemsOutput, error) { - req, out := c.TransactGetItemsRequest(input) - return out, req.Send() -} - -// TransactGetItemsWithContext is the same as TransactGetItems with the addition of -// the ability to pass a context and additional request options. -// -// See TransactGetItems for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DynamoDB) TransactGetItemsWithContext(ctx aws.Context, input *TransactGetItemsInput, opts ...request.Option) (*TransactGetItemsOutput, error) { - req, out := c.TransactGetItemsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opTransactWriteItems = "TransactWriteItems" - -// TransactWriteItemsRequest generates a "aws/request.Request" representing the -// client's request for the TransactWriteItems operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See TransactWriteItems for more information on using the TransactWriteItems -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the TransactWriteItemsRequest method. -// req, resp := client.TransactWriteItemsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/TransactWriteItems -func (c *DynamoDB) TransactWriteItemsRequest(input *TransactWriteItemsInput) (req *request.Request, output *TransactWriteItemsOutput) { - op := &request.Operation{ - Name: opTransactWriteItems, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &TransactWriteItemsInput{} - } - - output = &TransactWriteItemsOutput{} - req = c.newRequest(op, input, output) - // if custom endpoint for the request is set to a non empty string, - // we skip the endpoint discovery workflow. - if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { - if aws.BoolValue(req.Config.EnableEndpointDiscovery) { - de := discovererDescribeEndpoints{ - Required: false, - EndpointCache: c.endpointCache, - Params: map[string]*string{ - "op": aws.String(req.Operation.Name), - }, - Client: c, - } - - for k, v := range de.Params { - if v == nil { - delete(de.Params, k) - } - } - - req.Handlers.Build.PushFrontNamed(request.NamedHandler{ - Name: "crr.endpointdiscovery", - Fn: de.Handler, - }) - } - } - return -} - -// TransactWriteItems API operation for Amazon DynamoDB. -// -// TransactWriteItems is a synchronous write operation that groups up to 100 -// action requests. These actions can target items in different tables, but -// not in different Amazon Web Services accounts or Regions, and no two actions -// can target the same item. For example, you cannot both ConditionCheck and -// Update the same item. The aggregate size of the items in the transaction -// cannot exceed 4 MB. -// -// The actions are completed atomically so that either all of them succeed, -// or all of them fail. They are defined by the following objects: -// -// - Put — Initiates a PutItem operation to write a new item. This structure -// specifies the primary key of the item to be written, the name of the table -// to write it in, an optional condition expression that must be satisfied -// for the write to succeed, a list of the item's attributes, and a field -// indicating whether to retrieve the item's attributes if the condition -// is not met. -// -// - Update — Initiates an UpdateItem operation to update an existing item. -// This structure specifies the primary key of the item to be updated, the -// name of the table where it resides, an optional condition expression that -// must be satisfied for the update to succeed, an expression that defines -// one or more attributes to be updated, and a field indicating whether to -// retrieve the item's attributes if the condition is not met. -// -// - Delete — Initiates a DeleteItem operation to delete an existing item. -// This structure specifies the primary key of the item to be deleted, the -// name of the table where it resides, an optional condition expression that -// must be satisfied for the deletion to succeed, and a field indicating -// whether to retrieve the item's attributes if the condition is not met. -// -// - ConditionCheck — Applies a condition to an item that is not being -// modified by the transaction. This structure specifies the primary key -// of the item to be checked, the name of the table where it resides, a condition -// expression that must be satisfied for the transaction to succeed, and -// a field indicating whether to retrieve the item's attributes if the condition -// is not met. -// -// DynamoDB rejects the entire TransactWriteItems request if any of the following -// is true: -// -// - A condition in one of the condition expressions is not met. -// -// - An ongoing operation is in the process of updating the same item. -// -// - There is insufficient provisioned capacity for the transaction to be -// completed. -// -// - An item size becomes too large (bigger than 400 KB), a local secondary -// index (LSI) becomes too large, or a similar validation error occurs because -// of changes made by the transaction. -// -// - The aggregate size of the items in the transaction exceeds 4 MB. -// -// - There is a user error, such as an invalid data format. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB's -// API operation TransactWriteItems for usage and error information. -// -// Returned Error Types: -// -// - ResourceNotFoundException -// The operation tried to access a nonexistent table or index. The resource -// might not be specified correctly, or its status might not be ACTIVE. -// -// - TransactionCanceledException -// The entire transaction request was canceled. -// -// DynamoDB cancels a TransactWriteItems request under the following circumstances: -// -// - A condition in one of the condition expressions is not met. -// -// - A table in the TransactWriteItems request is in a different account -// or region. -// -// - More than one action in the TransactWriteItems operation targets the -// same item. -// -// - There is insufficient provisioned capacity for the transaction to be -// completed. -// -// - An item size becomes too large (larger than 400 KB), or a local secondary -// index (LSI) becomes too large, or a similar validation error occurs because -// of changes made by the transaction. -// -// - There is a user error, such as an invalid data format. -// -// - There is an ongoing TransactWriteItems operation that conflicts with -// a concurrent TransactWriteItems request. In this case the TransactWriteItems -// operation fails with a TransactionCanceledException. -// -// DynamoDB cancels a TransactGetItems request under the following circumstances: -// -// - There is an ongoing TransactGetItems operation that conflicts with a -// concurrent PutItem, UpdateItem, DeleteItem or TransactWriteItems request. -// In this case the TransactGetItems operation fails with a TransactionCanceledException. -// -// - A table in the TransactGetItems request is in a different account or -// region. -// -// - There is insufficient provisioned capacity for the transaction to be -// completed. -// -// - There is a user error, such as an invalid data format. -// -// If using Java, DynamoDB lists the cancellation reasons on the CancellationReasons -// property. This property is not set for other languages. Transaction cancellation -// reasons are ordered in the order of requested items, if an item has no error -// it will have None code and Null message. -// -// Cancellation reason codes and possible error messages: -// -// - No Errors: Code: None Message: null -// -// - Conditional Check Failed: Code: ConditionalCheckFailed Message: The -// conditional request failed. -// -// - Item Collection Size Limit Exceeded: Code: ItemCollectionSizeLimitExceeded -// Message: Collection size exceeded. -// -// - Transaction Conflict: Code: TransactionConflict Message: Transaction -// is ongoing for the item. -// -// - Provisioned Throughput Exceeded: Code: ProvisionedThroughputExceeded -// Messages: The level of configured provisioned throughput for the table -// was exceeded. Consider increasing your provisioning level with the UpdateTable -// API. This Message is received when provisioned throughput is exceeded -// is on a provisioned DynamoDB table. The level of configured provisioned -// throughput for one or more global secondary indexes of the table was exceeded. -// Consider increasing your provisioning level for the under-provisioned -// global secondary indexes with the UpdateTable API. This message is returned -// when provisioned throughput is exceeded is on a provisioned GSI. -// -// - Throttling Error: Code: ThrottlingError Messages: Throughput exceeds -// the current capacity of your table or index. DynamoDB is automatically -// scaling your table or index so please try again shortly. If exceptions -// persist, check if you have a hot key: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/bp-partition-key-design.html. -// This message is returned when writes get throttled on an On-Demand table -// as DynamoDB is automatically scaling the table. Throughput exceeds the -// current capacity for one or more global secondary indexes. DynamoDB is -// automatically scaling your index so please try again shortly. This message -// is returned when writes get throttled on an On-Demand GSI as DynamoDB -// is automatically scaling the GSI. -// -// - Validation Error: Code: ValidationError Messages: One or more parameter -// values were invalid. The update expression attempted to update the secondary -// index key beyond allowed size limits. The update expression attempted -// to update the secondary index key to unsupported type. An operand in the -// update expression has an incorrect data type. Item size to update has -// exceeded the maximum allowed size. Number overflow. Attempting to store -// a number with magnitude larger than supported range. Type mismatch for -// attribute to update. Nesting Levels have exceeded supported limits. The -// document path provided in the update expression is invalid for update. -// The provided expression refers to an attribute that does not exist in -// the item. -// -// - TransactionInProgressException -// The transaction with the given request token is already in progress. -// -// Recommended Settings -// -// This is a general recommendation for handling the TransactionInProgressException. -// These settings help ensure that the client retries will trigger completion -// of the ongoing TransactWriteItems request. -// -// - Set clientExecutionTimeout to a value that allows at least one retry -// to be processed after 5 seconds have elapsed since the first attempt for -// the TransactWriteItems operation. -// -// - Set socketTimeout to a value a little lower than the requestTimeout -// setting. -// -// - requestTimeout should be set based on the time taken for the individual -// retries of a single HTTP request for your use case, but setting it to -// 1 second or higher should work well to reduce chances of retries and TransactionInProgressException -// errors. -// -// - Use exponential backoff when retrying and tune backoff if needed. -// -// Assuming default retry policy (https://github.com/aws/aws-sdk-java/blob/fd409dee8ae23fb8953e0bb4dbde65536a7e0514/aws-java-sdk-core/src/main/java/com/amazonaws/retry/PredefinedRetryPolicies.java#L97), -// example timeout settings based on the guidelines above are as follows: -// -// Example timeline: -// -// - 0-1000 first attempt -// -// - 1000-1500 first sleep/delay (default retry policy uses 500 ms as base -// delay for 4xx errors) -// -// - 1500-2500 second attempt -// -// - 2500-3500 second sleep/delay (500 * 2, exponential backoff) -// -// - 3500-4500 third attempt -// -// - 4500-6500 third sleep/delay (500 * 2^2) -// -// - 6500-7500 fourth attempt (this can trigger inline recovery since 5 seconds -// have elapsed since the first attempt reached TC) -// -// - IdempotentParameterMismatchException -// DynamoDB rejected the request because you retried a request with a different -// payload but with an idempotent token that was already used. -// -// - ProvisionedThroughputExceededException -// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB -// automatically retry requests that receive this exception. Your request is -// eventually successful, unless your retry queue is too large to finish. Reduce -// the frequency of requests and use exponential backoff. For more information, -// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) -// in the Amazon DynamoDB Developer Guide. -// -// - RequestLimitExceeded -// Throughput exceeds the current throughput quota for your account. Please -// contact Amazon Web Services Support (https://aws.amazon.com/support) to request -// a quota increase. -// -// - InternalServerError -// An error occurred on the server side. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/TransactWriteItems -func (c *DynamoDB) TransactWriteItems(input *TransactWriteItemsInput) (*TransactWriteItemsOutput, error) { - req, out := c.TransactWriteItemsRequest(input) - return out, req.Send() -} - -// TransactWriteItemsWithContext is the same as TransactWriteItems with the addition of -// the ability to pass a context and additional request options. -// -// See TransactWriteItems for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DynamoDB) TransactWriteItemsWithContext(ctx aws.Context, input *TransactWriteItemsInput, opts ...request.Option) (*TransactWriteItemsOutput, error) { - req, out := c.TransactWriteItemsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUntagResource = "UntagResource" - -// UntagResourceRequest generates a "aws/request.Request" representing the -// client's request for the UntagResource operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UntagResource for more information on using the UntagResource -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the UntagResourceRequest method. -// req, resp := client.UntagResourceRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UntagResource -func (c *DynamoDB) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { - op := &request.Operation{ - Name: opUntagResource, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &UntagResourceInput{} - } - - output = &UntagResourceOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - // if custom endpoint for the request is set to a non empty string, - // we skip the endpoint discovery workflow. - if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { - if aws.BoolValue(req.Config.EnableEndpointDiscovery) { - de := discovererDescribeEndpoints{ - Required: false, - EndpointCache: c.endpointCache, - Params: map[string]*string{ - "op": aws.String(req.Operation.Name), - }, - Client: c, - } - - for k, v := range de.Params { - if v == nil { - delete(de.Params, k) - } - } - - req.Handlers.Build.PushFrontNamed(request.NamedHandler{ - Name: "crr.endpointdiscovery", - Fn: de.Handler, - }) - } - } - return -} - -// UntagResource API operation for Amazon DynamoDB. -// -// Removes the association of tags from an Amazon DynamoDB resource. You can -// call UntagResource up to five times per second, per account. -// -// For an overview on tagging DynamoDB resources, see Tagging for DynamoDB (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html) -// in the Amazon DynamoDB Developer Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB's -// API operation UntagResource for usage and error information. -// -// Returned Error Types: -// -// - LimitExceededException -// There is no limit to the number of daily on-demand backups that can be taken. -// -// For most purposes, up to 500 simultaneous table operations are allowed per -// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, -// RestoreTableFromBackup, and RestoreTableToPointInTime. -// -// When you are creating a table with one or more secondary indexes, you can -// have up to 250 such requests running at a time. However, if the table or -// index specifications are complex, then DynamoDB might temporarily reduce -// the number of concurrent operations. -// -// When importing into DynamoDB, up to 50 simultaneous import table operations -// are allowed per account. -// -// There is a soft account quota of 2,500 tables. -// -// GetRecords was called with a value of more than 1000 for the limit request -// parameter. -// -// More than 2 processes are reading from the same streams shard at the same -// time. Exceeding this limit may result in request throttling. -// -// - ResourceNotFoundException -// The operation tried to access a nonexistent table or index. The resource -// might not be specified correctly, or its status might not be ACTIVE. -// -// - InternalServerError -// An error occurred on the server side. -// -// - ResourceInUseException -// The operation conflicts with the resource's availability. For example, you -// attempted to recreate an existing table, or tried to delete a table currently -// in the CREATING state. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UntagResource -func (c *DynamoDB) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { - req, out := c.UntagResourceRequest(input) - return out, req.Send() -} - -// UntagResourceWithContext is the same as UntagResource with the addition of -// the ability to pass a context and additional request options. -// -// See UntagResource for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DynamoDB) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { - req, out := c.UntagResourceRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUpdateContinuousBackups = "UpdateContinuousBackups" - -// UpdateContinuousBackupsRequest generates a "aws/request.Request" representing the -// client's request for the UpdateContinuousBackups operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UpdateContinuousBackups for more information on using the UpdateContinuousBackups -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the UpdateContinuousBackupsRequest method. -// req, resp := client.UpdateContinuousBackupsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateContinuousBackups -func (c *DynamoDB) UpdateContinuousBackupsRequest(input *UpdateContinuousBackupsInput) (req *request.Request, output *UpdateContinuousBackupsOutput) { - op := &request.Operation{ - Name: opUpdateContinuousBackups, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &UpdateContinuousBackupsInput{} - } - - output = &UpdateContinuousBackupsOutput{} - req = c.newRequest(op, input, output) - // if custom endpoint for the request is set to a non empty string, - // we skip the endpoint discovery workflow. - if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { - if aws.BoolValue(req.Config.EnableEndpointDiscovery) { - de := discovererDescribeEndpoints{ - Required: false, - EndpointCache: c.endpointCache, - Params: map[string]*string{ - "op": aws.String(req.Operation.Name), - }, - Client: c, - } - - for k, v := range de.Params { - if v == nil { - delete(de.Params, k) - } - } - - req.Handlers.Build.PushFrontNamed(request.NamedHandler{ - Name: "crr.endpointdiscovery", - Fn: de.Handler, - }) - } - } - return -} - -// UpdateContinuousBackups API operation for Amazon DynamoDB. -// -// UpdateContinuousBackups enables or disables point in time recovery for the -// specified table. A successful UpdateContinuousBackups call returns the current -// ContinuousBackupsDescription. Continuous backups are ENABLED on all tables -// at table creation. If point in time recovery is enabled, PointInTimeRecoveryStatus -// will be set to ENABLED. -// -// Once continuous backups and point in time recovery are enabled, you can restore -// to any point in time within EarliestRestorableDateTime and LatestRestorableDateTime. -// -// LatestRestorableDateTime is typically 5 minutes before the current time. -// You can restore your table to any point in time during the last 35 days. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB's -// API operation UpdateContinuousBackups for usage and error information. -// -// Returned Error Types: -// -// - TableNotFoundException -// A source table with the name TableName does not currently exist within the -// subscriber's account or the subscriber is operating in the wrong Amazon Web -// Services Region. -// -// - ContinuousBackupsUnavailableException -// Backups have not yet been enabled for this table. -// -// - InternalServerError -// An error occurred on the server side. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateContinuousBackups -func (c *DynamoDB) UpdateContinuousBackups(input *UpdateContinuousBackupsInput) (*UpdateContinuousBackupsOutput, error) { - req, out := c.UpdateContinuousBackupsRequest(input) - return out, req.Send() -} - -// UpdateContinuousBackupsWithContext is the same as UpdateContinuousBackups with the addition of -// the ability to pass a context and additional request options. -// -// See UpdateContinuousBackups for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DynamoDB) UpdateContinuousBackupsWithContext(ctx aws.Context, input *UpdateContinuousBackupsInput, opts ...request.Option) (*UpdateContinuousBackupsOutput, error) { - req, out := c.UpdateContinuousBackupsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUpdateContributorInsights = "UpdateContributorInsights" - -// UpdateContributorInsightsRequest generates a "aws/request.Request" representing the -// client's request for the UpdateContributorInsights operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UpdateContributorInsights for more information on using the UpdateContributorInsights -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the UpdateContributorInsightsRequest method. -// req, resp := client.UpdateContributorInsightsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateContributorInsights -func (c *DynamoDB) UpdateContributorInsightsRequest(input *UpdateContributorInsightsInput) (req *request.Request, output *UpdateContributorInsightsOutput) { - op := &request.Operation{ - Name: opUpdateContributorInsights, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &UpdateContributorInsightsInput{} - } - - output = &UpdateContributorInsightsOutput{} - req = c.newRequest(op, input, output) - return -} - -// UpdateContributorInsights API operation for Amazon DynamoDB. -// -// Updates the status for contributor insights for a specific table or index. -// CloudWatch Contributor Insights for DynamoDB graphs display the partition -// key and (if applicable) sort key of frequently accessed items and frequently -// throttled items in plaintext. If you require the use of Amazon Web Services -// Key Management Service (KMS) to encrypt this table’s partition key and -// sort key data with an Amazon Web Services managed key or customer managed -// key, you should not enable CloudWatch Contributor Insights for DynamoDB for -// this table. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB's -// API operation UpdateContributorInsights for usage and error information. -// -// Returned Error Types: -// -// - ResourceNotFoundException -// The operation tried to access a nonexistent table or index. The resource -// might not be specified correctly, or its status might not be ACTIVE. -// -// - InternalServerError -// An error occurred on the server side. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateContributorInsights -func (c *DynamoDB) UpdateContributorInsights(input *UpdateContributorInsightsInput) (*UpdateContributorInsightsOutput, error) { - req, out := c.UpdateContributorInsightsRequest(input) - return out, req.Send() -} - -// UpdateContributorInsightsWithContext is the same as UpdateContributorInsights with the addition of -// the ability to pass a context and additional request options. -// -// See UpdateContributorInsights for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DynamoDB) UpdateContributorInsightsWithContext(ctx aws.Context, input *UpdateContributorInsightsInput, opts ...request.Option) (*UpdateContributorInsightsOutput, error) { - req, out := c.UpdateContributorInsightsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUpdateGlobalTable = "UpdateGlobalTable" - -// UpdateGlobalTableRequest generates a "aws/request.Request" representing the -// client's request for the UpdateGlobalTable operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UpdateGlobalTable for more information on using the UpdateGlobalTable -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the UpdateGlobalTableRequest method. -// req, resp := client.UpdateGlobalTableRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateGlobalTable -func (c *DynamoDB) UpdateGlobalTableRequest(input *UpdateGlobalTableInput) (req *request.Request, output *UpdateGlobalTableOutput) { - op := &request.Operation{ - Name: opUpdateGlobalTable, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &UpdateGlobalTableInput{} - } - - output = &UpdateGlobalTableOutput{} - req = c.newRequest(op, input, output) - // if custom endpoint for the request is set to a non empty string, - // we skip the endpoint discovery workflow. - if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { - if aws.BoolValue(req.Config.EnableEndpointDiscovery) { - de := discovererDescribeEndpoints{ - Required: false, - EndpointCache: c.endpointCache, - Params: map[string]*string{ - "op": aws.String(req.Operation.Name), - }, - Client: c, - } - - for k, v := range de.Params { - if v == nil { - delete(de.Params, k) - } - } - - req.Handlers.Build.PushFrontNamed(request.NamedHandler{ - Name: "crr.endpointdiscovery", - Fn: de.Handler, - }) - } - } - return -} - -// UpdateGlobalTable API operation for Amazon DynamoDB. -// -// Adds or removes replicas in the specified global table. The global table -// must already exist to be able to use this operation. Any replica to be added -// must be empty, have the same name as the global table, have the same key -// schema, have DynamoDB Streams enabled, and have the same provisioned and -// maximum write capacity units. -// -// This documentation is for version 2017.11.29 (Legacy) of global tables, which -// should be avoided for new global tables. Customers should use Global Tables -// version 2019.11.21 (Current) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GlobalTables.html) -// when possible, because it provides greater flexibility, higher efficiency, -// and consumes less write capacity than 2017.11.29 (Legacy). -// -// To determine which version you're using, see Determining the global table -// version you are using (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.DetermineVersion.html). -// To update existing global tables from version 2017.11.29 (Legacy) to version -// 2019.11.21 (Current), see Upgrading global tables (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/V2globaltables_upgrade.html). -// -// For global tables, this operation only applies to global tables using Version -// 2019.11.21 (Current version). If you are using global tables Version 2019.11.21 -// (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GlobalTables.html) -// you can use UpdateTable (https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_UpdateTable.html) -// instead. -// -// Although you can use UpdateGlobalTable to add replicas and remove replicas -// in a single request, for simplicity we recommend that you issue separate -// requests for adding or removing replicas. -// -// If global secondary indexes are specified, then the following conditions -// must also be met: -// -// - The global secondary indexes must have the same name. -// -// - The global secondary indexes must have the same hash key and sort key -// (if present). -// -// - The global secondary indexes must have the same provisioned and maximum -// write capacity units. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB's -// API operation UpdateGlobalTable for usage and error information. -// -// Returned Error Types: -// -// - InternalServerError -// An error occurred on the server side. -// -// - GlobalTableNotFoundException -// The specified global table does not exist. -// -// - ReplicaAlreadyExistsException -// The specified replica is already part of the global table. -// -// - ReplicaNotFoundException -// The specified replica is no longer part of the global table. -// -// - TableNotFoundException -// A source table with the name TableName does not currently exist within the -// subscriber's account or the subscriber is operating in the wrong Amazon Web -// Services Region. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateGlobalTable -func (c *DynamoDB) UpdateGlobalTable(input *UpdateGlobalTableInput) (*UpdateGlobalTableOutput, error) { - req, out := c.UpdateGlobalTableRequest(input) - return out, req.Send() -} - -// UpdateGlobalTableWithContext is the same as UpdateGlobalTable with the addition of -// the ability to pass a context and additional request options. -// -// See UpdateGlobalTable for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DynamoDB) UpdateGlobalTableWithContext(ctx aws.Context, input *UpdateGlobalTableInput, opts ...request.Option) (*UpdateGlobalTableOutput, error) { - req, out := c.UpdateGlobalTableRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUpdateGlobalTableSettings = "UpdateGlobalTableSettings" - -// UpdateGlobalTableSettingsRequest generates a "aws/request.Request" representing the -// client's request for the UpdateGlobalTableSettings operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UpdateGlobalTableSettings for more information on using the UpdateGlobalTableSettings -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the UpdateGlobalTableSettingsRequest method. -// req, resp := client.UpdateGlobalTableSettingsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateGlobalTableSettings -func (c *DynamoDB) UpdateGlobalTableSettingsRequest(input *UpdateGlobalTableSettingsInput) (req *request.Request, output *UpdateGlobalTableSettingsOutput) { - op := &request.Operation{ - Name: opUpdateGlobalTableSettings, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &UpdateGlobalTableSettingsInput{} - } - - output = &UpdateGlobalTableSettingsOutput{} - req = c.newRequest(op, input, output) - // if custom endpoint for the request is set to a non empty string, - // we skip the endpoint discovery workflow. - if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { - if aws.BoolValue(req.Config.EnableEndpointDiscovery) { - de := discovererDescribeEndpoints{ - Required: false, - EndpointCache: c.endpointCache, - Params: map[string]*string{ - "op": aws.String(req.Operation.Name), - }, - Client: c, - } - - for k, v := range de.Params { - if v == nil { - delete(de.Params, k) - } - } - - req.Handlers.Build.PushFrontNamed(request.NamedHandler{ - Name: "crr.endpointdiscovery", - Fn: de.Handler, - }) - } - } - return -} - -// UpdateGlobalTableSettings API operation for Amazon DynamoDB. -// -// Updates settings for a global table. -// -// This documentation is for version 2017.11.29 (Legacy) of global tables, which -// should be avoided for new global tables. Customers should use Global Tables -// version 2019.11.21 (Current) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GlobalTables.html) -// when possible, because it provides greater flexibility, higher efficiency, -// and consumes less write capacity than 2017.11.29 (Legacy). -// -// To determine which version you're using, see Determining the global table -// version you are using (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.DetermineVersion.html). -// To update existing global tables from version 2017.11.29 (Legacy) to version -// 2019.11.21 (Current), see Upgrading global tables (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/V2globaltables_upgrade.html). -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB's -// API operation UpdateGlobalTableSettings for usage and error information. -// -// Returned Error Types: -// -// - GlobalTableNotFoundException -// The specified global table does not exist. -// -// - ReplicaNotFoundException -// The specified replica is no longer part of the global table. -// -// - IndexNotFoundException -// The operation tried to access a nonexistent index. -// -// - LimitExceededException -// There is no limit to the number of daily on-demand backups that can be taken. -// -// For most purposes, up to 500 simultaneous table operations are allowed per -// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, -// RestoreTableFromBackup, and RestoreTableToPointInTime. -// -// When you are creating a table with one or more secondary indexes, you can -// have up to 250 such requests running at a time. However, if the table or -// index specifications are complex, then DynamoDB might temporarily reduce -// the number of concurrent operations. -// -// When importing into DynamoDB, up to 50 simultaneous import table operations -// are allowed per account. -// -// There is a soft account quota of 2,500 tables. -// -// GetRecords was called with a value of more than 1000 for the limit request -// parameter. -// -// More than 2 processes are reading from the same streams shard at the same -// time. Exceeding this limit may result in request throttling. -// -// - ResourceInUseException -// The operation conflicts with the resource's availability. For example, you -// attempted to recreate an existing table, or tried to delete a table currently -// in the CREATING state. -// -// - InternalServerError -// An error occurred on the server side. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateGlobalTableSettings -func (c *DynamoDB) UpdateGlobalTableSettings(input *UpdateGlobalTableSettingsInput) (*UpdateGlobalTableSettingsOutput, error) { - req, out := c.UpdateGlobalTableSettingsRequest(input) - return out, req.Send() -} - -// UpdateGlobalTableSettingsWithContext is the same as UpdateGlobalTableSettings with the addition of -// the ability to pass a context and additional request options. -// -// See UpdateGlobalTableSettings for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DynamoDB) UpdateGlobalTableSettingsWithContext(ctx aws.Context, input *UpdateGlobalTableSettingsInput, opts ...request.Option) (*UpdateGlobalTableSettingsOutput, error) { - req, out := c.UpdateGlobalTableSettingsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUpdateItem = "UpdateItem" - -// UpdateItemRequest generates a "aws/request.Request" representing the -// client's request for the UpdateItem operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UpdateItem for more information on using the UpdateItem -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the UpdateItemRequest method. -// req, resp := client.UpdateItemRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateItem -func (c *DynamoDB) UpdateItemRequest(input *UpdateItemInput) (req *request.Request, output *UpdateItemOutput) { - op := &request.Operation{ - Name: opUpdateItem, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &UpdateItemInput{} - } - - output = &UpdateItemOutput{} - req = c.newRequest(op, input, output) - // if custom endpoint for the request is set to a non empty string, - // we skip the endpoint discovery workflow. - if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { - if aws.BoolValue(req.Config.EnableEndpointDiscovery) { - de := discovererDescribeEndpoints{ - Required: false, - EndpointCache: c.endpointCache, - Params: map[string]*string{ - "op": aws.String(req.Operation.Name), - }, - Client: c, - } - - for k, v := range de.Params { - if v == nil { - delete(de.Params, k) - } - } - - req.Handlers.Build.PushFrontNamed(request.NamedHandler{ - Name: "crr.endpointdiscovery", - Fn: de.Handler, - }) - } - } - return -} - -// UpdateItem API operation for Amazon DynamoDB. -// -// Edits an existing item's attributes, or adds a new item to the table if it -// does not already exist. You can put, delete, or add attribute values. You -// can also perform a conditional update on an existing item (insert a new attribute -// name-value pair if it doesn't exist, or replace an existing name-value pair -// if it has certain expected attribute values). -// -// You can also return the item's attribute values in the same UpdateItem operation -// using the ReturnValues parameter. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB's -// API operation UpdateItem for usage and error information. -// -// Returned Error Types: -// -// - ConditionalCheckFailedException -// A condition specified in the operation could not be evaluated. -// -// - ProvisionedThroughputExceededException -// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB -// automatically retry requests that receive this exception. Your request is -// eventually successful, unless your retry queue is too large to finish. Reduce -// the frequency of requests and use exponential backoff. For more information, -// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) -// in the Amazon DynamoDB Developer Guide. -// -// - ResourceNotFoundException -// The operation tried to access a nonexistent table or index. The resource -// might not be specified correctly, or its status might not be ACTIVE. -// -// - ItemCollectionSizeLimitExceededException -// An item collection is too large. This exception is only returned for tables -// that have one or more local secondary indexes. -// -// - TransactionConflictException -// Operation was rejected because there is an ongoing transaction for the item. -// -// - RequestLimitExceeded -// Throughput exceeds the current throughput quota for your account. Please -// contact Amazon Web Services Support (https://aws.amazon.com/support) to request -// a quota increase. -// -// - InternalServerError -// An error occurred on the server side. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateItem -func (c *DynamoDB) UpdateItem(input *UpdateItemInput) (*UpdateItemOutput, error) { - req, out := c.UpdateItemRequest(input) - return out, req.Send() -} - -// UpdateItemWithContext is the same as UpdateItem with the addition of -// the ability to pass a context and additional request options. -// -// See UpdateItem for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DynamoDB) UpdateItemWithContext(ctx aws.Context, input *UpdateItemInput, opts ...request.Option) (*UpdateItemOutput, error) { - req, out := c.UpdateItemRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUpdateKinesisStreamingDestination = "UpdateKinesisStreamingDestination" - -// UpdateKinesisStreamingDestinationRequest generates a "aws/request.Request" representing the -// client's request for the UpdateKinesisStreamingDestination operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UpdateKinesisStreamingDestination for more information on using the UpdateKinesisStreamingDestination -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the UpdateKinesisStreamingDestinationRequest method. -// req, resp := client.UpdateKinesisStreamingDestinationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateKinesisStreamingDestination -func (c *DynamoDB) UpdateKinesisStreamingDestinationRequest(input *UpdateKinesisStreamingDestinationInput) (req *request.Request, output *UpdateKinesisStreamingDestinationOutput) { - op := &request.Operation{ - Name: opUpdateKinesisStreamingDestination, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &UpdateKinesisStreamingDestinationInput{} - } - - output = &UpdateKinesisStreamingDestinationOutput{} - req = c.newRequest(op, input, output) - // if custom endpoint for the request is set to a non empty string, - // we skip the endpoint discovery workflow. - if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { - if aws.BoolValue(req.Config.EnableEndpointDiscovery) { - de := discovererDescribeEndpoints{ - Required: false, - EndpointCache: c.endpointCache, - Params: map[string]*string{ - "op": aws.String(req.Operation.Name), - }, - Client: c, - } - - for k, v := range de.Params { - if v == nil { - delete(de.Params, k) - } - } - - req.Handlers.Build.PushFrontNamed(request.NamedHandler{ - Name: "crr.endpointdiscovery", - Fn: de.Handler, - }) - } - } - return -} - -// UpdateKinesisStreamingDestination API operation for Amazon DynamoDB. -// -// The command to update the Kinesis stream destination. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB's -// API operation UpdateKinesisStreamingDestination for usage and error information. -// -// Returned Error Types: -// -// - InternalServerError -// An error occurred on the server side. -// -// - LimitExceededException -// There is no limit to the number of daily on-demand backups that can be taken. -// -// For most purposes, up to 500 simultaneous table operations are allowed per -// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, -// RestoreTableFromBackup, and RestoreTableToPointInTime. -// -// When you are creating a table with one or more secondary indexes, you can -// have up to 250 such requests running at a time. However, if the table or -// index specifications are complex, then DynamoDB might temporarily reduce -// the number of concurrent operations. -// -// When importing into DynamoDB, up to 50 simultaneous import table operations -// are allowed per account. -// -// There is a soft account quota of 2,500 tables. -// -// GetRecords was called with a value of more than 1000 for the limit request -// parameter. -// -// More than 2 processes are reading from the same streams shard at the same -// time. Exceeding this limit may result in request throttling. -// -// - ResourceInUseException -// The operation conflicts with the resource's availability. For example, you -// attempted to recreate an existing table, or tried to delete a table currently -// in the CREATING state. -// -// - ResourceNotFoundException -// The operation tried to access a nonexistent table or index. The resource -// might not be specified correctly, or its status might not be ACTIVE. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateKinesisStreamingDestination -func (c *DynamoDB) UpdateKinesisStreamingDestination(input *UpdateKinesisStreamingDestinationInput) (*UpdateKinesisStreamingDestinationOutput, error) { - req, out := c.UpdateKinesisStreamingDestinationRequest(input) - return out, req.Send() -} - -// UpdateKinesisStreamingDestinationWithContext is the same as UpdateKinesisStreamingDestination with the addition of -// the ability to pass a context and additional request options. -// -// See UpdateKinesisStreamingDestination for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DynamoDB) UpdateKinesisStreamingDestinationWithContext(ctx aws.Context, input *UpdateKinesisStreamingDestinationInput, opts ...request.Option) (*UpdateKinesisStreamingDestinationOutput, error) { - req, out := c.UpdateKinesisStreamingDestinationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUpdateTable = "UpdateTable" - -// UpdateTableRequest generates a "aws/request.Request" representing the -// client's request for the UpdateTable operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UpdateTable for more information on using the UpdateTable -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the UpdateTableRequest method. -// req, resp := client.UpdateTableRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateTable -func (c *DynamoDB) UpdateTableRequest(input *UpdateTableInput) (req *request.Request, output *UpdateTableOutput) { - op := &request.Operation{ - Name: opUpdateTable, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &UpdateTableInput{} - } - - output = &UpdateTableOutput{} - req = c.newRequest(op, input, output) - // if custom endpoint for the request is set to a non empty string, - // we skip the endpoint discovery workflow. - if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { - if aws.BoolValue(req.Config.EnableEndpointDiscovery) { - de := discovererDescribeEndpoints{ - Required: false, - EndpointCache: c.endpointCache, - Params: map[string]*string{ - "op": aws.String(req.Operation.Name), - }, - Client: c, - } - - for k, v := range de.Params { - if v == nil { - delete(de.Params, k) - } - } - - req.Handlers.Build.PushFrontNamed(request.NamedHandler{ - Name: "crr.endpointdiscovery", - Fn: de.Handler, - }) - } - } - return -} - -// UpdateTable API operation for Amazon DynamoDB. -// -// Modifies the provisioned throughput settings, global secondary indexes, or -// DynamoDB Streams settings for a given table. -// -// For global tables, this operation only applies to global tables using Version -// 2019.11.21 (Current version). -// -// You can only perform one of the following operations at once: -// -// - Modify the provisioned throughput settings of the table. -// -// - Remove a global secondary index from the table. -// -// - Create a new global secondary index on the table. After the index begins -// backfilling, you can use UpdateTable to perform other operations. -// -// UpdateTable is an asynchronous operation; while it's executing, the table -// status changes from ACTIVE to UPDATING. While it's UPDATING, you can't issue -// another UpdateTable request. When the table returns to the ACTIVE state, -// the UpdateTable operation is complete. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB's -// API operation UpdateTable for usage and error information. -// -// Returned Error Types: -// -// - ResourceInUseException -// The operation conflicts with the resource's availability. For example, you -// attempted to recreate an existing table, or tried to delete a table currently -// in the CREATING state. -// -// - ResourceNotFoundException -// The operation tried to access a nonexistent table or index. The resource -// might not be specified correctly, or its status might not be ACTIVE. -// -// - LimitExceededException -// There is no limit to the number of daily on-demand backups that can be taken. -// -// For most purposes, up to 500 simultaneous table operations are allowed per -// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, -// RestoreTableFromBackup, and RestoreTableToPointInTime. -// -// When you are creating a table with one or more secondary indexes, you can -// have up to 250 such requests running at a time. However, if the table or -// index specifications are complex, then DynamoDB might temporarily reduce -// the number of concurrent operations. -// -// When importing into DynamoDB, up to 50 simultaneous import table operations -// are allowed per account. -// -// There is a soft account quota of 2,500 tables. -// -// GetRecords was called with a value of more than 1000 for the limit request -// parameter. -// -// More than 2 processes are reading from the same streams shard at the same -// time. Exceeding this limit may result in request throttling. -// -// - InternalServerError -// An error occurred on the server side. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateTable -func (c *DynamoDB) UpdateTable(input *UpdateTableInput) (*UpdateTableOutput, error) { - req, out := c.UpdateTableRequest(input) - return out, req.Send() -} - -// UpdateTableWithContext is the same as UpdateTable with the addition of -// the ability to pass a context and additional request options. -// -// See UpdateTable for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DynamoDB) UpdateTableWithContext(ctx aws.Context, input *UpdateTableInput, opts ...request.Option) (*UpdateTableOutput, error) { - req, out := c.UpdateTableRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUpdateTableReplicaAutoScaling = "UpdateTableReplicaAutoScaling" - -// UpdateTableReplicaAutoScalingRequest generates a "aws/request.Request" representing the -// client's request for the UpdateTableReplicaAutoScaling operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UpdateTableReplicaAutoScaling for more information on using the UpdateTableReplicaAutoScaling -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the UpdateTableReplicaAutoScalingRequest method. -// req, resp := client.UpdateTableReplicaAutoScalingRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateTableReplicaAutoScaling -func (c *DynamoDB) UpdateTableReplicaAutoScalingRequest(input *UpdateTableReplicaAutoScalingInput) (req *request.Request, output *UpdateTableReplicaAutoScalingOutput) { - op := &request.Operation{ - Name: opUpdateTableReplicaAutoScaling, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &UpdateTableReplicaAutoScalingInput{} - } - - output = &UpdateTableReplicaAutoScalingOutput{} - req = c.newRequest(op, input, output) - return -} - -// UpdateTableReplicaAutoScaling API operation for Amazon DynamoDB. -// -// Updates auto scaling settings on your global tables at once. -// -// For global tables, this operation only applies to global tables using Version -// 2019.11.21 (Current version). -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB's -// API operation UpdateTableReplicaAutoScaling for usage and error information. -// -// Returned Error Types: -// -// - ResourceNotFoundException -// The operation tried to access a nonexistent table or index. The resource -// might not be specified correctly, or its status might not be ACTIVE. -// -// - ResourceInUseException -// The operation conflicts with the resource's availability. For example, you -// attempted to recreate an existing table, or tried to delete a table currently -// in the CREATING state. -// -// - LimitExceededException -// There is no limit to the number of daily on-demand backups that can be taken. -// -// For most purposes, up to 500 simultaneous table operations are allowed per -// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, -// RestoreTableFromBackup, and RestoreTableToPointInTime. -// -// When you are creating a table with one or more secondary indexes, you can -// have up to 250 such requests running at a time. However, if the table or -// index specifications are complex, then DynamoDB might temporarily reduce -// the number of concurrent operations. -// -// When importing into DynamoDB, up to 50 simultaneous import table operations -// are allowed per account. -// -// There is a soft account quota of 2,500 tables. -// -// GetRecords was called with a value of more than 1000 for the limit request -// parameter. -// -// More than 2 processes are reading from the same streams shard at the same -// time. Exceeding this limit may result in request throttling. -// -// - InternalServerError -// An error occurred on the server side. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateTableReplicaAutoScaling -func (c *DynamoDB) UpdateTableReplicaAutoScaling(input *UpdateTableReplicaAutoScalingInput) (*UpdateTableReplicaAutoScalingOutput, error) { - req, out := c.UpdateTableReplicaAutoScalingRequest(input) - return out, req.Send() -} - -// UpdateTableReplicaAutoScalingWithContext is the same as UpdateTableReplicaAutoScaling with the addition of -// the ability to pass a context and additional request options. -// -// See UpdateTableReplicaAutoScaling for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DynamoDB) UpdateTableReplicaAutoScalingWithContext(ctx aws.Context, input *UpdateTableReplicaAutoScalingInput, opts ...request.Option) (*UpdateTableReplicaAutoScalingOutput, error) { - req, out := c.UpdateTableReplicaAutoScalingRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUpdateTimeToLive = "UpdateTimeToLive" - -// UpdateTimeToLiveRequest generates a "aws/request.Request" representing the -// client's request for the UpdateTimeToLive operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UpdateTimeToLive for more information on using the UpdateTimeToLive -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the UpdateTimeToLiveRequest method. -// req, resp := client.UpdateTimeToLiveRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateTimeToLive -func (c *DynamoDB) UpdateTimeToLiveRequest(input *UpdateTimeToLiveInput) (req *request.Request, output *UpdateTimeToLiveOutput) { - op := &request.Operation{ - Name: opUpdateTimeToLive, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &UpdateTimeToLiveInput{} - } - - output = &UpdateTimeToLiveOutput{} - req = c.newRequest(op, input, output) - // if custom endpoint for the request is set to a non empty string, - // we skip the endpoint discovery workflow. - if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { - if aws.BoolValue(req.Config.EnableEndpointDiscovery) { - de := discovererDescribeEndpoints{ - Required: false, - EndpointCache: c.endpointCache, - Params: map[string]*string{ - "op": aws.String(req.Operation.Name), - }, - Client: c, - } - - for k, v := range de.Params { - if v == nil { - delete(de.Params, k) - } - } - - req.Handlers.Build.PushFrontNamed(request.NamedHandler{ - Name: "crr.endpointdiscovery", - Fn: de.Handler, - }) - } - } - return -} - -// UpdateTimeToLive API operation for Amazon DynamoDB. -// -// The UpdateTimeToLive method enables or disables Time to Live (TTL) for the -// specified table. A successful UpdateTimeToLive call returns the current TimeToLiveSpecification. -// It can take up to one hour for the change to fully process. Any additional -// UpdateTimeToLive calls for the same table during this one hour duration result -// in a ValidationException. -// -// TTL compares the current time in epoch time format to the time stored in -// the TTL attribute of an item. If the epoch time value stored in the attribute -// is less than the current time, the item is marked as expired and subsequently -// deleted. -// -// The epoch time format is the number of seconds elapsed since 12:00:00 AM -// January 1, 1970 UTC. -// -// DynamoDB deletes expired items on a best-effort basis to ensure availability -// of throughput for other data operations. -// -// DynamoDB typically deletes expired items within two days of expiration. The -// exact duration within which an item gets deleted after expiration is specific -// to the nature of the workload. Items that have expired and not been deleted -// will still show up in reads, queries, and scans. -// -// As items are deleted, they are removed from any local secondary index and -// global secondary index immediately in the same eventually consistent way -// as a standard delete operation. -// -// For more information, see Time To Live (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/TTL.html) -// in the Amazon DynamoDB Developer Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon DynamoDB's -// API operation UpdateTimeToLive for usage and error information. -// -// Returned Error Types: -// -// - ResourceInUseException -// The operation conflicts with the resource's availability. For example, you -// attempted to recreate an existing table, or tried to delete a table currently -// in the CREATING state. -// -// - ResourceNotFoundException -// The operation tried to access a nonexistent table or index. The resource -// might not be specified correctly, or its status might not be ACTIVE. -// -// - LimitExceededException -// There is no limit to the number of daily on-demand backups that can be taken. -// -// For most purposes, up to 500 simultaneous table operations are allowed per -// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, -// RestoreTableFromBackup, and RestoreTableToPointInTime. -// -// When you are creating a table with one or more secondary indexes, you can -// have up to 250 such requests running at a time. However, if the table or -// index specifications are complex, then DynamoDB might temporarily reduce -// the number of concurrent operations. -// -// When importing into DynamoDB, up to 50 simultaneous import table operations -// are allowed per account. -// -// There is a soft account quota of 2,500 tables. -// -// GetRecords was called with a value of more than 1000 for the limit request -// parameter. -// -// More than 2 processes are reading from the same streams shard at the same -// time. Exceeding this limit may result in request throttling. -// -// - InternalServerError -// An error occurred on the server side. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateTimeToLive -func (c *DynamoDB) UpdateTimeToLive(input *UpdateTimeToLiveInput) (*UpdateTimeToLiveOutput, error) { - req, out := c.UpdateTimeToLiveRequest(input) - return out, req.Send() -} - -// UpdateTimeToLiveWithContext is the same as UpdateTimeToLive with the addition of -// the ability to pass a context and additional request options. -// -// See UpdateTimeToLive for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DynamoDB) UpdateTimeToLiveWithContext(ctx aws.Context, input *UpdateTimeToLiveInput, opts ...request.Option) (*UpdateTimeToLiveOutput, error) { - req, out := c.UpdateTimeToLiveRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// Contains details of a table archival operation. -type ArchivalSummary struct { - _ struct{} `type:"structure"` - - // The Amazon Resource Name (ARN) of the backup the table was archived to, when - // applicable in the archival reason. If you wish to restore this backup to - // the same table name, you will need to delete the original table. - ArchivalBackupArn *string `min:"37" type:"string"` - - // The date and time when table archival was initiated by DynamoDB, in UNIX - // epoch time format. - ArchivalDateTime *time.Time `type:"timestamp"` - - // The reason DynamoDB archived the table. Currently, the only possible value - // is: - // - // * INACCESSIBLE_ENCRYPTION_CREDENTIALS - The table was archived due to - // the table's KMS key being inaccessible for more than seven days. An On-Demand - // backup was created at the archival time. - ArchivalReason *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ArchivalSummary) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ArchivalSummary) GoString() string { - return s.String() -} - -// SetArchivalBackupArn sets the ArchivalBackupArn field's value. -func (s *ArchivalSummary) SetArchivalBackupArn(v string) *ArchivalSummary { - s.ArchivalBackupArn = &v - return s -} - -// SetArchivalDateTime sets the ArchivalDateTime field's value. -func (s *ArchivalSummary) SetArchivalDateTime(v time.Time) *ArchivalSummary { - s.ArchivalDateTime = &v - return s -} - -// SetArchivalReason sets the ArchivalReason field's value. -func (s *ArchivalSummary) SetArchivalReason(v string) *ArchivalSummary { - s.ArchivalReason = &v - return s -} - -// Represents an attribute for describing the schema for the table and indexes. -type AttributeDefinition struct { - _ struct{} `type:"structure"` - - // A name for the attribute. - // - // AttributeName is a required field - AttributeName *string `min:"1" type:"string" required:"true"` - - // The data type for the attribute, where: - // - // * S - the attribute is of type String - // - // * N - the attribute is of type Number - // - // * B - the attribute is of type Binary - // - // AttributeType is a required field - AttributeType *string `type:"string" required:"true" enum:"ScalarAttributeType"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AttributeDefinition) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AttributeDefinition) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *AttributeDefinition) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AttributeDefinition"} - if s.AttributeName == nil { - invalidParams.Add(request.NewErrParamRequired("AttributeName")) - } - if s.AttributeName != nil && len(*s.AttributeName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("AttributeName", 1)) - } - if s.AttributeType == nil { - invalidParams.Add(request.NewErrParamRequired("AttributeType")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAttributeName sets the AttributeName field's value. -func (s *AttributeDefinition) SetAttributeName(v string) *AttributeDefinition { - s.AttributeName = &v - return s -} - -// SetAttributeType sets the AttributeType field's value. -func (s *AttributeDefinition) SetAttributeType(v string) *AttributeDefinition { - s.AttributeType = &v - return s -} - -// Represents the data for an attribute. -// -// Each attribute value is described as a name-value pair. The name is the data -// type, and the value is the data itself. -// -// For more information, see Data Types (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.NamingRulesDataTypes.html#HowItWorks.DataTypes) -// in the Amazon DynamoDB Developer Guide. -type AttributeValue struct { - _ struct{} `type:"structure"` - - // An attribute of type Binary. For example: - // - // "B": "dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk" - // B is automatically base64 encoded/decoded by the SDK. - B []byte `type:"blob"` - - // An attribute of type Boolean. For example: - // - // "BOOL": true - BOOL *bool `type:"boolean"` - - // An attribute of type Binary Set. For example: - // - // "BS": ["U3Vubnk=", "UmFpbnk=", "U25vd3k="] - BS [][]byte `type:"list"` - - // An attribute of type List. For example: - // - // "L": [ {"S": "Cookies"} , {"S": "Coffee"}, {"N": "3.14159"}] - L []*AttributeValue `type:"list"` - - // An attribute of type Map. For example: - // - // "M": {"Name": {"S": "Joe"}, "Age": {"N": "35"}} - M map[string]*AttributeValue `type:"map"` - - // An attribute of type Number. For example: - // - // "N": "123.45" - // - // Numbers are sent across the network to DynamoDB as strings, to maximize compatibility - // across languages and libraries. However, DynamoDB treats them as number type - // attributes for mathematical operations. - N *string `type:"string"` - - // An attribute of type Number Set. For example: - // - // "NS": ["42.2", "-19", "7.5", "3.14"] - // - // Numbers are sent across the network to DynamoDB as strings, to maximize compatibility - // across languages and libraries. However, DynamoDB treats them as number type - // attributes for mathematical operations. - NS []*string `type:"list"` - - // An attribute of type Null. For example: - // - // "NULL": true - NULL *bool `type:"boolean"` - - // An attribute of type String. For example: - // - // "S": "Hello" - S *string `type:"string"` - - // An attribute of type String Set. For example: - // - // "SS": ["Giraffe", "Hippo" ,"Zebra"] - SS []*string `type:"list"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AttributeValue) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AttributeValue) GoString() string { - return s.String() -} - -// SetB sets the B field's value. -func (s *AttributeValue) SetB(v []byte) *AttributeValue { - s.B = v - return s -} - -// SetBOOL sets the BOOL field's value. -func (s *AttributeValue) SetBOOL(v bool) *AttributeValue { - s.BOOL = &v - return s -} - -// SetBS sets the BS field's value. -func (s *AttributeValue) SetBS(v [][]byte) *AttributeValue { - s.BS = v - return s -} - -// SetL sets the L field's value. -func (s *AttributeValue) SetL(v []*AttributeValue) *AttributeValue { - s.L = v - return s -} - -// SetM sets the M field's value. -func (s *AttributeValue) SetM(v map[string]*AttributeValue) *AttributeValue { - s.M = v - return s -} - -// SetN sets the N field's value. -func (s *AttributeValue) SetN(v string) *AttributeValue { - s.N = &v - return s -} - -// SetNS sets the NS field's value. -func (s *AttributeValue) SetNS(v []*string) *AttributeValue { - s.NS = v - return s -} - -// SetNULL sets the NULL field's value. -func (s *AttributeValue) SetNULL(v bool) *AttributeValue { - s.NULL = &v - return s -} - -// SetS sets the S field's value. -func (s *AttributeValue) SetS(v string) *AttributeValue { - s.S = &v - return s -} - -// SetSS sets the SS field's value. -func (s *AttributeValue) SetSS(v []*string) *AttributeValue { - s.SS = v - return s -} - -// For the UpdateItem operation, represents the attributes to be modified, the -// action to perform on each, and the new value for each. -// -// You cannot use UpdateItem to update any primary key attributes. Instead, -// you will need to delete the item, and then use PutItem to create a new item -// with new attributes. -// -// Attribute values cannot be null; string and binary type attributes must have -// lengths greater than zero; and set type attributes must not be empty. Requests -// with empty values will be rejected with a ValidationException exception. -type AttributeValueUpdate struct { - _ struct{} `type:"structure"` - - // Specifies how to perform the update. Valid values are PUT (default), DELETE, - // and ADD. The behavior depends on whether the specified primary key already - // exists in the table. - // - // If an item with the specified Key is found in the table: - // - // * PUT - Adds the specified attribute to the item. If the attribute already - // exists, it is replaced by the new value. - // - // * DELETE - If no value is specified, the attribute and its value are removed - // from the item. The data type of the specified value must match the existing - // value's data type. If a set of values is specified, then those values - // are subtracted from the old set. For example, if the attribute value was - // the set [a,b,c] and the DELETE action specified [a,c], then the final - // attribute value would be [b]. Specifying an empty set is an error. - // - // * ADD - If the attribute does not already exist, then the attribute and - // its values are added to the item. If the attribute does exist, then the - // behavior of ADD depends on the data type of the attribute: If the existing - // attribute is a number, and if Value is also a number, then the Value is - // mathematically added to the existing attribute. If Value is a negative - // number, then it is subtracted from the existing attribute. If you use - // ADD to increment or decrement a number value for an item that doesn't - // exist before the update, DynamoDB uses 0 as the initial value. In addition, - // if you use ADD to update an existing item, and intend to increment or - // decrement an attribute value which does not yet exist, DynamoDB uses 0 - // as the initial value. For example, suppose that the item you want to update - // does not yet have an attribute named itemcount, but you decide to ADD - // the number 3 to this attribute anyway, even though it currently does not - // exist. DynamoDB will create the itemcount attribute, set its initial value - // to 0, and finally add 3 to it. The result will be a new itemcount attribute - // in the item, with a value of 3. If the existing data type is a set, and - // if the Value is also a set, then the Value is added to the existing set. - // (This is a set operation, not mathematical addition.) For example, if - // the attribute value was the set [1,2], and the ADD action specified [3], - // then the final attribute value would be [1,2,3]. An error occurs if an - // Add action is specified for a set attribute and the attribute type specified - // does not match the existing set type. Both sets must have the same primitive - // data type. For example, if the existing data type is a set of strings, - // the Value must also be a set of strings. The same holds true for number - // sets and binary sets. This action is only valid for an existing attribute - // whose data type is number or is a set. Do not use ADD for any other data - // types. - // - // If no item with the specified Key is found: - // - // * PUT - DynamoDB creates a new item with the specified primary key, and - // then adds the attribute. - // - // * DELETE - Nothing happens; there is no attribute to delete. - // - // * ADD - DynamoDB creates a new item with the supplied primary key and - // number (or set) for the attribute value. The only data types allowed are - // number, number set, string set or binary set. - Action *string `type:"string" enum:"AttributeAction"` - - // Represents the data for an attribute. - // - // Each attribute value is described as a name-value pair. The name is the data - // type, and the value is the data itself. - // - // For more information, see Data Types (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.NamingRulesDataTypes.html#HowItWorks.DataTypes) - // in the Amazon DynamoDB Developer Guide. - Value *AttributeValue `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AttributeValueUpdate) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AttributeValueUpdate) GoString() string { - return s.String() -} - -// SetAction sets the Action field's value. -func (s *AttributeValueUpdate) SetAction(v string) *AttributeValueUpdate { - s.Action = &v - return s -} - -// SetValue sets the Value field's value. -func (s *AttributeValueUpdate) SetValue(v *AttributeValue) *AttributeValueUpdate { - s.Value = v - return s -} - -// Represents the properties of the scaling policy. -type AutoScalingPolicyDescription struct { - _ struct{} `type:"structure"` - - // The name of the scaling policy. - PolicyName *string `min:"1" type:"string"` - - // Represents a target tracking scaling policy configuration. - TargetTrackingScalingPolicyConfiguration *AutoScalingTargetTrackingScalingPolicyConfigurationDescription `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AutoScalingPolicyDescription) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AutoScalingPolicyDescription) GoString() string { - return s.String() -} - -// SetPolicyName sets the PolicyName field's value. -func (s *AutoScalingPolicyDescription) SetPolicyName(v string) *AutoScalingPolicyDescription { - s.PolicyName = &v - return s -} - -// SetTargetTrackingScalingPolicyConfiguration sets the TargetTrackingScalingPolicyConfiguration field's value. -func (s *AutoScalingPolicyDescription) SetTargetTrackingScalingPolicyConfiguration(v *AutoScalingTargetTrackingScalingPolicyConfigurationDescription) *AutoScalingPolicyDescription { - s.TargetTrackingScalingPolicyConfiguration = v - return s -} - -// Represents the auto scaling policy to be modified. -type AutoScalingPolicyUpdate struct { - _ struct{} `type:"structure"` - - // The name of the scaling policy. - PolicyName *string `min:"1" type:"string"` - - // Represents a target tracking scaling policy configuration. - // - // TargetTrackingScalingPolicyConfiguration is a required field - TargetTrackingScalingPolicyConfiguration *AutoScalingTargetTrackingScalingPolicyConfigurationUpdate `type:"structure" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AutoScalingPolicyUpdate) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AutoScalingPolicyUpdate) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *AutoScalingPolicyUpdate) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AutoScalingPolicyUpdate"} - if s.PolicyName != nil && len(*s.PolicyName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("PolicyName", 1)) - } - if s.TargetTrackingScalingPolicyConfiguration == nil { - invalidParams.Add(request.NewErrParamRequired("TargetTrackingScalingPolicyConfiguration")) - } - if s.TargetTrackingScalingPolicyConfiguration != nil { - if err := s.TargetTrackingScalingPolicyConfiguration.Validate(); err != nil { - invalidParams.AddNested("TargetTrackingScalingPolicyConfiguration", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetPolicyName sets the PolicyName field's value. -func (s *AutoScalingPolicyUpdate) SetPolicyName(v string) *AutoScalingPolicyUpdate { - s.PolicyName = &v - return s -} - -// SetTargetTrackingScalingPolicyConfiguration sets the TargetTrackingScalingPolicyConfiguration field's value. -func (s *AutoScalingPolicyUpdate) SetTargetTrackingScalingPolicyConfiguration(v *AutoScalingTargetTrackingScalingPolicyConfigurationUpdate) *AutoScalingPolicyUpdate { - s.TargetTrackingScalingPolicyConfiguration = v - return s -} - -// Represents the auto scaling settings for a global table or global secondary -// index. -type AutoScalingSettingsDescription struct { - _ struct{} `type:"structure"` - - // Disabled auto scaling for this global table or global secondary index. - AutoScalingDisabled *bool `type:"boolean"` - - // Role ARN used for configuring the auto scaling policy. - AutoScalingRoleArn *string `type:"string"` - - // The maximum capacity units that a global table or global secondary index - // should be scaled up to. - MaximumUnits *int64 `min:"1" type:"long"` - - // The minimum capacity units that a global table or global secondary index - // should be scaled down to. - MinimumUnits *int64 `min:"1" type:"long"` - - // Information about the scaling policies. - ScalingPolicies []*AutoScalingPolicyDescription `type:"list"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AutoScalingSettingsDescription) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AutoScalingSettingsDescription) GoString() string { - return s.String() -} - -// SetAutoScalingDisabled sets the AutoScalingDisabled field's value. -func (s *AutoScalingSettingsDescription) SetAutoScalingDisabled(v bool) *AutoScalingSettingsDescription { - s.AutoScalingDisabled = &v - return s -} - -// SetAutoScalingRoleArn sets the AutoScalingRoleArn field's value. -func (s *AutoScalingSettingsDescription) SetAutoScalingRoleArn(v string) *AutoScalingSettingsDescription { - s.AutoScalingRoleArn = &v - return s -} - -// SetMaximumUnits sets the MaximumUnits field's value. -func (s *AutoScalingSettingsDescription) SetMaximumUnits(v int64) *AutoScalingSettingsDescription { - s.MaximumUnits = &v - return s -} - -// SetMinimumUnits sets the MinimumUnits field's value. -func (s *AutoScalingSettingsDescription) SetMinimumUnits(v int64) *AutoScalingSettingsDescription { - s.MinimumUnits = &v - return s -} - -// SetScalingPolicies sets the ScalingPolicies field's value. -func (s *AutoScalingSettingsDescription) SetScalingPolicies(v []*AutoScalingPolicyDescription) *AutoScalingSettingsDescription { - s.ScalingPolicies = v - return s -} - -// Represents the auto scaling settings to be modified for a global table or -// global secondary index. -type AutoScalingSettingsUpdate struct { - _ struct{} `type:"structure"` - - // Disabled auto scaling for this global table or global secondary index. - AutoScalingDisabled *bool `type:"boolean"` - - // Role ARN used for configuring auto scaling policy. - AutoScalingRoleArn *string `min:"1" type:"string"` - - // The maximum capacity units that a global table or global secondary index - // should be scaled up to. - MaximumUnits *int64 `min:"1" type:"long"` - - // The minimum capacity units that a global table or global secondary index - // should be scaled down to. - MinimumUnits *int64 `min:"1" type:"long"` - - // The scaling policy to apply for scaling target global table or global secondary - // index capacity units. - ScalingPolicyUpdate *AutoScalingPolicyUpdate `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AutoScalingSettingsUpdate) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AutoScalingSettingsUpdate) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *AutoScalingSettingsUpdate) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AutoScalingSettingsUpdate"} - if s.AutoScalingRoleArn != nil && len(*s.AutoScalingRoleArn) < 1 { - invalidParams.Add(request.NewErrParamMinLen("AutoScalingRoleArn", 1)) - } - if s.MaximumUnits != nil && *s.MaximumUnits < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaximumUnits", 1)) - } - if s.MinimumUnits != nil && *s.MinimumUnits < 1 { - invalidParams.Add(request.NewErrParamMinValue("MinimumUnits", 1)) - } - if s.ScalingPolicyUpdate != nil { - if err := s.ScalingPolicyUpdate.Validate(); err != nil { - invalidParams.AddNested("ScalingPolicyUpdate", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAutoScalingDisabled sets the AutoScalingDisabled field's value. -func (s *AutoScalingSettingsUpdate) SetAutoScalingDisabled(v bool) *AutoScalingSettingsUpdate { - s.AutoScalingDisabled = &v - return s -} - -// SetAutoScalingRoleArn sets the AutoScalingRoleArn field's value. -func (s *AutoScalingSettingsUpdate) SetAutoScalingRoleArn(v string) *AutoScalingSettingsUpdate { - s.AutoScalingRoleArn = &v - return s -} - -// SetMaximumUnits sets the MaximumUnits field's value. -func (s *AutoScalingSettingsUpdate) SetMaximumUnits(v int64) *AutoScalingSettingsUpdate { - s.MaximumUnits = &v - return s -} - -// SetMinimumUnits sets the MinimumUnits field's value. -func (s *AutoScalingSettingsUpdate) SetMinimumUnits(v int64) *AutoScalingSettingsUpdate { - s.MinimumUnits = &v - return s -} - -// SetScalingPolicyUpdate sets the ScalingPolicyUpdate field's value. -func (s *AutoScalingSettingsUpdate) SetScalingPolicyUpdate(v *AutoScalingPolicyUpdate) *AutoScalingSettingsUpdate { - s.ScalingPolicyUpdate = v - return s -} - -// Represents the properties of a target tracking scaling policy. -type AutoScalingTargetTrackingScalingPolicyConfigurationDescription struct { - _ struct{} `type:"structure"` - - // Indicates whether scale in by the target tracking policy is disabled. If - // the value is true, scale in is disabled and the target tracking policy won't - // remove capacity from the scalable resource. Otherwise, scale in is enabled - // and the target tracking policy can remove capacity from the scalable resource. - // The default value is false. - DisableScaleIn *bool `type:"boolean"` - - // The amount of time, in seconds, after a scale in activity completes before - // another scale in activity can start. The cooldown period is used to block - // subsequent scale in requests until it has expired. You should scale in conservatively - // to protect your application's availability. However, if another alarm triggers - // a scale out policy during the cooldown period after a scale-in, application - // auto scaling scales out your scalable target immediately. - ScaleInCooldown *int64 `type:"integer"` - - // The amount of time, in seconds, after a scale out activity completes before - // another scale out activity can start. While the cooldown period is in effect, - // the capacity that has been added by the previous scale out event that initiated - // the cooldown is calculated as part of the desired capacity for the next scale - // out. You should continuously (but not excessively) scale out. - ScaleOutCooldown *int64 `type:"integer"` - - // The target value for the metric. The range is 8.515920e-109 to 1.174271e+108 - // (Base 10) or 2e-360 to 2e360 (Base 2). - // - // TargetValue is a required field - TargetValue *float64 `type:"double" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AutoScalingTargetTrackingScalingPolicyConfigurationDescription) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AutoScalingTargetTrackingScalingPolicyConfigurationDescription) GoString() string { - return s.String() -} - -// SetDisableScaleIn sets the DisableScaleIn field's value. -func (s *AutoScalingTargetTrackingScalingPolicyConfigurationDescription) SetDisableScaleIn(v bool) *AutoScalingTargetTrackingScalingPolicyConfigurationDescription { - s.DisableScaleIn = &v - return s -} - -// SetScaleInCooldown sets the ScaleInCooldown field's value. -func (s *AutoScalingTargetTrackingScalingPolicyConfigurationDescription) SetScaleInCooldown(v int64) *AutoScalingTargetTrackingScalingPolicyConfigurationDescription { - s.ScaleInCooldown = &v - return s -} - -// SetScaleOutCooldown sets the ScaleOutCooldown field's value. -func (s *AutoScalingTargetTrackingScalingPolicyConfigurationDescription) SetScaleOutCooldown(v int64) *AutoScalingTargetTrackingScalingPolicyConfigurationDescription { - s.ScaleOutCooldown = &v - return s -} - -// SetTargetValue sets the TargetValue field's value. -func (s *AutoScalingTargetTrackingScalingPolicyConfigurationDescription) SetTargetValue(v float64) *AutoScalingTargetTrackingScalingPolicyConfigurationDescription { - s.TargetValue = &v - return s -} - -// Represents the settings of a target tracking scaling policy that will be -// modified. -type AutoScalingTargetTrackingScalingPolicyConfigurationUpdate struct { - _ struct{} `type:"structure"` - - // Indicates whether scale in by the target tracking policy is disabled. If - // the value is true, scale in is disabled and the target tracking policy won't - // remove capacity from the scalable resource. Otherwise, scale in is enabled - // and the target tracking policy can remove capacity from the scalable resource. - // The default value is false. - DisableScaleIn *bool `type:"boolean"` - - // The amount of time, in seconds, after a scale in activity completes before - // another scale in activity can start. The cooldown period is used to block - // subsequent scale in requests until it has expired. You should scale in conservatively - // to protect your application's availability. However, if another alarm triggers - // a scale out policy during the cooldown period after a scale-in, application - // auto scaling scales out your scalable target immediately. - ScaleInCooldown *int64 `type:"integer"` - - // The amount of time, in seconds, after a scale out activity completes before - // another scale out activity can start. While the cooldown period is in effect, - // the capacity that has been added by the previous scale out event that initiated - // the cooldown is calculated as part of the desired capacity for the next scale - // out. You should continuously (but not excessively) scale out. - ScaleOutCooldown *int64 `type:"integer"` - - // The target value for the metric. The range is 8.515920e-109 to 1.174271e+108 - // (Base 10) or 2e-360 to 2e360 (Base 2). - // - // TargetValue is a required field - TargetValue *float64 `type:"double" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AutoScalingTargetTrackingScalingPolicyConfigurationUpdate) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AutoScalingTargetTrackingScalingPolicyConfigurationUpdate) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *AutoScalingTargetTrackingScalingPolicyConfigurationUpdate) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AutoScalingTargetTrackingScalingPolicyConfigurationUpdate"} - if s.TargetValue == nil { - invalidParams.Add(request.NewErrParamRequired("TargetValue")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDisableScaleIn sets the DisableScaleIn field's value. -func (s *AutoScalingTargetTrackingScalingPolicyConfigurationUpdate) SetDisableScaleIn(v bool) *AutoScalingTargetTrackingScalingPolicyConfigurationUpdate { - s.DisableScaleIn = &v - return s -} - -// SetScaleInCooldown sets the ScaleInCooldown field's value. -func (s *AutoScalingTargetTrackingScalingPolicyConfigurationUpdate) SetScaleInCooldown(v int64) *AutoScalingTargetTrackingScalingPolicyConfigurationUpdate { - s.ScaleInCooldown = &v - return s -} - -// SetScaleOutCooldown sets the ScaleOutCooldown field's value. -func (s *AutoScalingTargetTrackingScalingPolicyConfigurationUpdate) SetScaleOutCooldown(v int64) *AutoScalingTargetTrackingScalingPolicyConfigurationUpdate { - s.ScaleOutCooldown = &v - return s -} - -// SetTargetValue sets the TargetValue field's value. -func (s *AutoScalingTargetTrackingScalingPolicyConfigurationUpdate) SetTargetValue(v float64) *AutoScalingTargetTrackingScalingPolicyConfigurationUpdate { - s.TargetValue = &v - return s -} - -// Contains the description of the backup created for the table. -type BackupDescription struct { - _ struct{} `type:"structure"` - - // Contains the details of the backup created for the table. - BackupDetails *BackupDetails `type:"structure"` - - // Contains the details of the table when the backup was created. - SourceTableDetails *SourceTableDetails `type:"structure"` - - // Contains the details of the features enabled on the table when the backup - // was created. For example, LSIs, GSIs, streams, TTL. - SourceTableFeatureDetails *SourceTableFeatureDetails `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s BackupDescription) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s BackupDescription) GoString() string { - return s.String() -} - -// SetBackupDetails sets the BackupDetails field's value. -func (s *BackupDescription) SetBackupDetails(v *BackupDetails) *BackupDescription { - s.BackupDetails = v - return s -} - -// SetSourceTableDetails sets the SourceTableDetails field's value. -func (s *BackupDescription) SetSourceTableDetails(v *SourceTableDetails) *BackupDescription { - s.SourceTableDetails = v - return s -} - -// SetSourceTableFeatureDetails sets the SourceTableFeatureDetails field's value. -func (s *BackupDescription) SetSourceTableFeatureDetails(v *SourceTableFeatureDetails) *BackupDescription { - s.SourceTableFeatureDetails = v - return s -} - -// Contains the details of the backup created for the table. -type BackupDetails struct { - _ struct{} `type:"structure"` - - // ARN associated with the backup. - // - // BackupArn is a required field - BackupArn *string `min:"37" type:"string" required:"true"` - - // Time at which the backup was created. This is the request time of the backup. - // - // BackupCreationDateTime is a required field - BackupCreationDateTime *time.Time `type:"timestamp" required:"true"` - - // Time at which the automatic on-demand backup created by DynamoDB will expire. - // This SYSTEM on-demand backup expires automatically 35 days after its creation. - BackupExpiryDateTime *time.Time `type:"timestamp"` - - // Name of the requested backup. - // - // BackupName is a required field - BackupName *string `min:"3" type:"string" required:"true"` - - // Size of the backup in bytes. DynamoDB updates this value approximately every - // six hours. Recent changes might not be reflected in this value. - BackupSizeBytes *int64 `type:"long"` - - // Backup can be in one of the following states: CREATING, ACTIVE, DELETED. - // - // BackupStatus is a required field - BackupStatus *string `type:"string" required:"true" enum:"BackupStatus"` - - // BackupType: - // - // * USER - You create and manage these using the on-demand backup feature. - // - // * SYSTEM - If you delete a table with point-in-time recovery enabled, - // a SYSTEM backup is automatically created and is retained for 35 days (at - // no additional cost). System backups allow you to restore the deleted table - // to the state it was in just before the point of deletion. - // - // * AWS_BACKUP - On-demand backup created by you from Backup service. - // - // BackupType is a required field - BackupType *string `type:"string" required:"true" enum:"BackupType"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s BackupDetails) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s BackupDetails) GoString() string { - return s.String() -} - -// SetBackupArn sets the BackupArn field's value. -func (s *BackupDetails) SetBackupArn(v string) *BackupDetails { - s.BackupArn = &v - return s -} - -// SetBackupCreationDateTime sets the BackupCreationDateTime field's value. -func (s *BackupDetails) SetBackupCreationDateTime(v time.Time) *BackupDetails { - s.BackupCreationDateTime = &v - return s -} - -// SetBackupExpiryDateTime sets the BackupExpiryDateTime field's value. -func (s *BackupDetails) SetBackupExpiryDateTime(v time.Time) *BackupDetails { - s.BackupExpiryDateTime = &v - return s -} - -// SetBackupName sets the BackupName field's value. -func (s *BackupDetails) SetBackupName(v string) *BackupDetails { - s.BackupName = &v - return s -} - -// SetBackupSizeBytes sets the BackupSizeBytes field's value. -func (s *BackupDetails) SetBackupSizeBytes(v int64) *BackupDetails { - s.BackupSizeBytes = &v - return s -} - -// SetBackupStatus sets the BackupStatus field's value. -func (s *BackupDetails) SetBackupStatus(v string) *BackupDetails { - s.BackupStatus = &v - return s -} - -// SetBackupType sets the BackupType field's value. -func (s *BackupDetails) SetBackupType(v string) *BackupDetails { - s.BackupType = &v - return s -} - -// There is another ongoing conflicting backup control plane operation on the -// table. The backup is either being created, deleted or restored to a table. -type BackupInUseException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s BackupInUseException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s BackupInUseException) GoString() string { - return s.String() -} - -func newErrorBackupInUseException(v protocol.ResponseMetadata) error { - return &BackupInUseException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *BackupInUseException) Code() string { - return "BackupInUseException" -} - -// Message returns the exception's message. -func (s *BackupInUseException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *BackupInUseException) OrigErr() error { - return nil -} - -func (s *BackupInUseException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *BackupInUseException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *BackupInUseException) RequestID() string { - return s.RespMetadata.RequestID -} - -// Backup not found for the given BackupARN. -type BackupNotFoundException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s BackupNotFoundException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s BackupNotFoundException) GoString() string { - return s.String() -} - -func newErrorBackupNotFoundException(v protocol.ResponseMetadata) error { - return &BackupNotFoundException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *BackupNotFoundException) Code() string { - return "BackupNotFoundException" -} - -// Message returns the exception's message. -func (s *BackupNotFoundException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *BackupNotFoundException) OrigErr() error { - return nil -} - -func (s *BackupNotFoundException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *BackupNotFoundException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *BackupNotFoundException) RequestID() string { - return s.RespMetadata.RequestID -} - -// Contains details for the backup. -type BackupSummary struct { - _ struct{} `type:"structure"` - - // ARN associated with the backup. - BackupArn *string `min:"37" type:"string"` - - // Time at which the backup was created. - BackupCreationDateTime *time.Time `type:"timestamp"` - - // Time at which the automatic on-demand backup created by DynamoDB will expire. - // This SYSTEM on-demand backup expires automatically 35 days after its creation. - BackupExpiryDateTime *time.Time `type:"timestamp"` - - // Name of the specified backup. - BackupName *string `min:"3" type:"string"` - - // Size of the backup in bytes. - BackupSizeBytes *int64 `type:"long"` - - // Backup can be in one of the following states: CREATING, ACTIVE, DELETED. - BackupStatus *string `type:"string" enum:"BackupStatus"` - - // BackupType: - // - // * USER - You create and manage these using the on-demand backup feature. - // - // * SYSTEM - If you delete a table with point-in-time recovery enabled, - // a SYSTEM backup is automatically created and is retained for 35 days (at - // no additional cost). System backups allow you to restore the deleted table - // to the state it was in just before the point of deletion. - // - // * AWS_BACKUP - On-demand backup created by you from Backup service. - BackupType *string `type:"string" enum:"BackupType"` - - // ARN associated with the table. - TableArn *string `min:"1" type:"string"` - - // Unique identifier for the table. - TableId *string `type:"string"` - - // Name of the table. - TableName *string `min:"3" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s BackupSummary) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s BackupSummary) GoString() string { - return s.String() -} - -// SetBackupArn sets the BackupArn field's value. -func (s *BackupSummary) SetBackupArn(v string) *BackupSummary { - s.BackupArn = &v - return s -} - -// SetBackupCreationDateTime sets the BackupCreationDateTime field's value. -func (s *BackupSummary) SetBackupCreationDateTime(v time.Time) *BackupSummary { - s.BackupCreationDateTime = &v - return s -} - -// SetBackupExpiryDateTime sets the BackupExpiryDateTime field's value. -func (s *BackupSummary) SetBackupExpiryDateTime(v time.Time) *BackupSummary { - s.BackupExpiryDateTime = &v - return s -} - -// SetBackupName sets the BackupName field's value. -func (s *BackupSummary) SetBackupName(v string) *BackupSummary { - s.BackupName = &v - return s -} - -// SetBackupSizeBytes sets the BackupSizeBytes field's value. -func (s *BackupSummary) SetBackupSizeBytes(v int64) *BackupSummary { - s.BackupSizeBytes = &v - return s -} - -// SetBackupStatus sets the BackupStatus field's value. -func (s *BackupSummary) SetBackupStatus(v string) *BackupSummary { - s.BackupStatus = &v - return s -} - -// SetBackupType sets the BackupType field's value. -func (s *BackupSummary) SetBackupType(v string) *BackupSummary { - s.BackupType = &v - return s -} - -// SetTableArn sets the TableArn field's value. -func (s *BackupSummary) SetTableArn(v string) *BackupSummary { - s.TableArn = &v - return s -} - -// SetTableId sets the TableId field's value. -func (s *BackupSummary) SetTableId(v string) *BackupSummary { - s.TableId = &v - return s -} - -// SetTableName sets the TableName field's value. -func (s *BackupSummary) SetTableName(v string) *BackupSummary { - s.TableName = &v - return s -} - -type BatchExecuteStatementInput struct { - _ struct{} `type:"structure"` - - // Determines the level of detail about either provisioned or on-demand throughput - // consumption that is returned in the response: - // - // * INDEXES - The response includes the aggregate ConsumedCapacity for the - // operation, together with ConsumedCapacity for each table and secondary - // index that was accessed. Note that some operations, such as GetItem and - // BatchGetItem, do not access any indexes at all. In these cases, specifying - // INDEXES will only return ConsumedCapacity information for table(s). - // - // * TOTAL - The response includes only the aggregate ConsumedCapacity for - // the operation. - // - // * NONE - No ConsumedCapacity details are included in the response. - ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` - - // The list of PartiQL statements representing the batch to run. - // - // Statements is a required field - Statements []*BatchStatementRequest `min:"1" type:"list" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s BatchExecuteStatementInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s BatchExecuteStatementInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *BatchExecuteStatementInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "BatchExecuteStatementInput"} - if s.Statements == nil { - invalidParams.Add(request.NewErrParamRequired("Statements")) - } - if s.Statements != nil && len(s.Statements) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Statements", 1)) - } - if s.Statements != nil { - for i, v := range s.Statements { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Statements", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value. -func (s *BatchExecuteStatementInput) SetReturnConsumedCapacity(v string) *BatchExecuteStatementInput { - s.ReturnConsumedCapacity = &v - return s -} - -// SetStatements sets the Statements field's value. -func (s *BatchExecuteStatementInput) SetStatements(v []*BatchStatementRequest) *BatchExecuteStatementInput { - s.Statements = v - return s -} - -type BatchExecuteStatementOutput struct { - _ struct{} `type:"structure"` - - // The capacity units consumed by the entire operation. The values of the list - // are ordered according to the ordering of the statements. - ConsumedCapacity []*ConsumedCapacity `type:"list"` - - // The response to each PartiQL statement in the batch. The values of the list - // are ordered according to the ordering of the request statements. - Responses []*BatchStatementResponse `type:"list"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s BatchExecuteStatementOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s BatchExecuteStatementOutput) GoString() string { - return s.String() -} - -// SetConsumedCapacity sets the ConsumedCapacity field's value. -func (s *BatchExecuteStatementOutput) SetConsumedCapacity(v []*ConsumedCapacity) *BatchExecuteStatementOutput { - s.ConsumedCapacity = v - return s -} - -// SetResponses sets the Responses field's value. -func (s *BatchExecuteStatementOutput) SetResponses(v []*BatchStatementResponse) *BatchExecuteStatementOutput { - s.Responses = v - return s -} - -// Represents the input of a BatchGetItem operation. -type BatchGetItemInput struct { - _ struct{} `type:"structure"` - - // A map of one or more table names or table ARNs and, for each table, a map - // that describes one or more items to retrieve from that table. Each table - // name or ARN can be used only once per BatchGetItem request. - // - // Each element in the map of items to retrieve consists of the following: - // - // * ConsistentRead - If true, a strongly consistent read is used; if false - // (the default), an eventually consistent read is used. - // - // * ExpressionAttributeNames - One or more substitution tokens for attribute - // names in the ProjectionExpression parameter. The following are some use - // cases for using ExpressionAttributeNames: To access an attribute whose - // name conflicts with a DynamoDB reserved word. To create a placeholder - // for repeating occurrences of an attribute name in an expression. To prevent - // special characters in an attribute name from being misinterpreted in an - // expression. Use the # character in an expression to dereference an attribute - // name. For example, consider the following attribute name: Percentile The - // name of this attribute conflicts with a reserved word, so it cannot be - // used directly in an expression. (For the complete list of reserved words, - // see Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) - // in the Amazon DynamoDB Developer Guide). To work around this, you could - // specify the following for ExpressionAttributeNames: {"#P":"Percentile"} - // You could then use this substitution in an expression, as in this example: - // #P = :val Tokens that begin with the : character are expression attribute - // values, which are placeholders for the actual value at runtime. For more - // information about expression attribute names, see Accessing Item Attributes - // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) - // in the Amazon DynamoDB Developer Guide. - // - // * Keys - An array of primary key attribute values that define specific - // items in the table. For each primary key, you must provide all of the - // key attributes. For example, with a simple primary key, you only need - // to provide the partition key value. For a composite key, you must provide - // both the partition key value and the sort key value. - // - // * ProjectionExpression - A string that identifies one or more attributes - // to retrieve from the table. These attributes can include scalars, sets, - // or elements of a JSON document. The attributes in the expression must - // be separated by commas. If no attribute names are specified, then all - // attributes are returned. If any of the requested attributes are not found, - // they do not appear in the result. For more information, see Accessing - // Item Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) - // in the Amazon DynamoDB Developer Guide. - // - // * AttributesToGet - This is a legacy parameter. Use ProjectionExpression - // instead. For more information, see AttributesToGet (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributesToGet.html) - // in the Amazon DynamoDB Developer Guide. - // - // RequestItems is a required field - RequestItems map[string]*KeysAndAttributes `min:"1" type:"map" required:"true"` - - // Determines the level of detail about either provisioned or on-demand throughput - // consumption that is returned in the response: - // - // * INDEXES - The response includes the aggregate ConsumedCapacity for the - // operation, together with ConsumedCapacity for each table and secondary - // index that was accessed. Note that some operations, such as GetItem and - // BatchGetItem, do not access any indexes at all. In these cases, specifying - // INDEXES will only return ConsumedCapacity information for table(s). - // - // * TOTAL - The response includes only the aggregate ConsumedCapacity for - // the operation. - // - // * NONE - No ConsumedCapacity details are included in the response. - ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s BatchGetItemInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s BatchGetItemInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *BatchGetItemInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "BatchGetItemInput"} - if s.RequestItems == nil { - invalidParams.Add(request.NewErrParamRequired("RequestItems")) - } - if s.RequestItems != nil && len(s.RequestItems) < 1 { - invalidParams.Add(request.NewErrParamMinLen("RequestItems", 1)) - } - if s.RequestItems != nil { - for i, v := range s.RequestItems { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "RequestItems", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetRequestItems sets the RequestItems field's value. -func (s *BatchGetItemInput) SetRequestItems(v map[string]*KeysAndAttributes) *BatchGetItemInput { - s.RequestItems = v - return s -} - -// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value. -func (s *BatchGetItemInput) SetReturnConsumedCapacity(v string) *BatchGetItemInput { - s.ReturnConsumedCapacity = &v - return s -} - -// Represents the output of a BatchGetItem operation. -type BatchGetItemOutput struct { - _ struct{} `type:"structure"` - - // The read capacity units consumed by the entire BatchGetItem operation. - // - // Each element consists of: - // - // * TableName - The table that consumed the provisioned throughput. - // - // * CapacityUnits - The total number of capacity units consumed. - ConsumedCapacity []*ConsumedCapacity `type:"list"` - - // A map of table name or table ARN to a list of items. Each object in Responses - // consists of a table name or ARN, along with a map of attribute data consisting - // of the data type and attribute value. - Responses map[string][]map[string]*AttributeValue `type:"map"` - - // A map of tables and their respective keys that were not processed with the - // current response. The UnprocessedKeys value is in the same form as RequestItems, - // so the value can be provided directly to a subsequent BatchGetItem operation. - // For more information, see RequestItems in the Request Parameters section. - // - // Each element consists of: - // - // * Keys - An array of primary key attribute values that define specific - // items in the table. - // - // * ProjectionExpression - One or more attributes to be retrieved from the - // table or index. By default, all attributes are returned. If a requested - // attribute is not found, it does not appear in the result. - // - // * ConsistentRead - The consistency of a read operation. If set to true, - // then a strongly consistent read is used; otherwise, an eventually consistent - // read is used. - // - // If there are no unprocessed keys remaining, the response contains an empty - // UnprocessedKeys map. - UnprocessedKeys map[string]*KeysAndAttributes `min:"1" type:"map"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s BatchGetItemOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s BatchGetItemOutput) GoString() string { - return s.String() -} - -// SetConsumedCapacity sets the ConsumedCapacity field's value. -func (s *BatchGetItemOutput) SetConsumedCapacity(v []*ConsumedCapacity) *BatchGetItemOutput { - s.ConsumedCapacity = v - return s -} - -// SetResponses sets the Responses field's value. -func (s *BatchGetItemOutput) SetResponses(v map[string][]map[string]*AttributeValue) *BatchGetItemOutput { - s.Responses = v - return s -} - -// SetUnprocessedKeys sets the UnprocessedKeys field's value. -func (s *BatchGetItemOutput) SetUnprocessedKeys(v map[string]*KeysAndAttributes) *BatchGetItemOutput { - s.UnprocessedKeys = v - return s -} - -// An error associated with a statement in a PartiQL batch that was run. -type BatchStatementError struct { - _ struct{} `type:"structure"` - - // The error code associated with the failed PartiQL batch statement. - Code *string `type:"string" enum:"BatchStatementErrorCodeEnum"` - - // The item which caused the condition check to fail. This will be set if ReturnValuesOnConditionCheckFailure - // is specified as ALL_OLD. - Item map[string]*AttributeValue `type:"map"` - - // The error message associated with the PartiQL batch response. - Message *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s BatchStatementError) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s BatchStatementError) GoString() string { - return s.String() -} - -// SetCode sets the Code field's value. -func (s *BatchStatementError) SetCode(v string) *BatchStatementError { - s.Code = &v - return s -} - -// SetItem sets the Item field's value. -func (s *BatchStatementError) SetItem(v map[string]*AttributeValue) *BatchStatementError { - s.Item = v - return s -} - -// SetMessage sets the Message field's value. -func (s *BatchStatementError) SetMessage(v string) *BatchStatementError { - s.Message = &v - return s -} - -// A PartiQL batch statement request. -type BatchStatementRequest struct { - _ struct{} `type:"structure"` - - // The read consistency of the PartiQL batch request. - ConsistentRead *bool `type:"boolean"` - - // The parameters associated with a PartiQL statement in the batch request. - Parameters []*AttributeValue `min:"1" type:"list"` - - // An optional parameter that returns the item attributes for a PartiQL batch - // request operation that failed a condition check. - // - // There is no additional cost associated with requesting a return value aside - // from the small network and processing overhead of receiving a larger response. - // No read capacity units are consumed. - ReturnValuesOnConditionCheckFailure *string `type:"string" enum:"ReturnValuesOnConditionCheckFailure"` - - // A valid PartiQL statement. - // - // Statement is a required field - Statement *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s BatchStatementRequest) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s BatchStatementRequest) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *BatchStatementRequest) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "BatchStatementRequest"} - if s.Parameters != nil && len(s.Parameters) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Parameters", 1)) - } - if s.Statement == nil { - invalidParams.Add(request.NewErrParamRequired("Statement")) - } - if s.Statement != nil && len(*s.Statement) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Statement", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetConsistentRead sets the ConsistentRead field's value. -func (s *BatchStatementRequest) SetConsistentRead(v bool) *BatchStatementRequest { - s.ConsistentRead = &v - return s -} - -// SetParameters sets the Parameters field's value. -func (s *BatchStatementRequest) SetParameters(v []*AttributeValue) *BatchStatementRequest { - s.Parameters = v - return s -} - -// SetReturnValuesOnConditionCheckFailure sets the ReturnValuesOnConditionCheckFailure field's value. -func (s *BatchStatementRequest) SetReturnValuesOnConditionCheckFailure(v string) *BatchStatementRequest { - s.ReturnValuesOnConditionCheckFailure = &v - return s -} - -// SetStatement sets the Statement field's value. -func (s *BatchStatementRequest) SetStatement(v string) *BatchStatementRequest { - s.Statement = &v - return s -} - -// A PartiQL batch statement response.. -type BatchStatementResponse struct { - _ struct{} `type:"structure"` - - // The error associated with a failed PartiQL batch statement. - Error *BatchStatementError `type:"structure"` - - // A DynamoDB item associated with a BatchStatementResponse - Item map[string]*AttributeValue `type:"map"` - - // The table name associated with a failed PartiQL batch statement. - TableName *string `min:"3" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s BatchStatementResponse) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s BatchStatementResponse) GoString() string { - return s.String() -} - -// SetError sets the Error field's value. -func (s *BatchStatementResponse) SetError(v *BatchStatementError) *BatchStatementResponse { - s.Error = v - return s -} - -// SetItem sets the Item field's value. -func (s *BatchStatementResponse) SetItem(v map[string]*AttributeValue) *BatchStatementResponse { - s.Item = v - return s -} - -// SetTableName sets the TableName field's value. -func (s *BatchStatementResponse) SetTableName(v string) *BatchStatementResponse { - s.TableName = &v - return s -} - -// Represents the input of a BatchWriteItem operation. -type BatchWriteItemInput struct { - _ struct{} `type:"structure"` - - // A map of one or more table names or table ARNs and, for each table, a list - // of operations to be performed (DeleteRequest or PutRequest). Each element - // in the map consists of the following: - // - // * DeleteRequest - Perform a DeleteItem operation on the specified item. - // The item to be deleted is identified by a Key subelement: Key - A map - // of primary key attribute values that uniquely identify the item. Each - // entry in this map consists of an attribute name and an attribute value. - // For each primary key, you must provide all of the key attributes. For - // example, with a simple primary key, you only need to provide a value for - // the partition key. For a composite primary key, you must provide values - // for both the partition key and the sort key. - // - // * PutRequest - Perform a PutItem operation on the specified item. The - // item to be put is identified by an Item subelement: Item - A map of attributes - // and their values. Each entry in this map consists of an attribute name - // and an attribute value. Attribute values must not be null; string and - // binary type attributes must have lengths greater than zero; and set type - // attributes must not be empty. Requests that contain empty values are rejected - // with a ValidationException exception. If you specify any attributes that - // are part of an index key, then the data types for those attributes must - // match those of the schema in the table's attribute definition. - // - // RequestItems is a required field - RequestItems map[string][]*WriteRequest `min:"1" type:"map" required:"true"` - - // Determines the level of detail about either provisioned or on-demand throughput - // consumption that is returned in the response: - // - // * INDEXES - The response includes the aggregate ConsumedCapacity for the - // operation, together with ConsumedCapacity for each table and secondary - // index that was accessed. Note that some operations, such as GetItem and - // BatchGetItem, do not access any indexes at all. In these cases, specifying - // INDEXES will only return ConsumedCapacity information for table(s). - // - // * TOTAL - The response includes only the aggregate ConsumedCapacity for - // the operation. - // - // * NONE - No ConsumedCapacity details are included in the response. - ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` - - // Determines whether item collection metrics are returned. If set to SIZE, - // the response includes statistics about item collections, if any, that were - // modified during the operation are returned in the response. If set to NONE - // (the default), no statistics are returned. - ReturnItemCollectionMetrics *string `type:"string" enum:"ReturnItemCollectionMetrics"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s BatchWriteItemInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s BatchWriteItemInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *BatchWriteItemInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "BatchWriteItemInput"} - if s.RequestItems == nil { - invalidParams.Add(request.NewErrParamRequired("RequestItems")) - } - if s.RequestItems != nil && len(s.RequestItems) < 1 { - invalidParams.Add(request.NewErrParamMinLen("RequestItems", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetRequestItems sets the RequestItems field's value. -func (s *BatchWriteItemInput) SetRequestItems(v map[string][]*WriteRequest) *BatchWriteItemInput { - s.RequestItems = v - return s -} - -// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value. -func (s *BatchWriteItemInput) SetReturnConsumedCapacity(v string) *BatchWriteItemInput { - s.ReturnConsumedCapacity = &v - return s -} - -// SetReturnItemCollectionMetrics sets the ReturnItemCollectionMetrics field's value. -func (s *BatchWriteItemInput) SetReturnItemCollectionMetrics(v string) *BatchWriteItemInput { - s.ReturnItemCollectionMetrics = &v - return s -} - -// Represents the output of a BatchWriteItem operation. -type BatchWriteItemOutput struct { - _ struct{} `type:"structure"` - - // The capacity units consumed by the entire BatchWriteItem operation. - // - // Each element consists of: - // - // * TableName - The table that consumed the provisioned throughput. - // - // * CapacityUnits - The total number of capacity units consumed. - ConsumedCapacity []*ConsumedCapacity `type:"list"` - - // A list of tables that were processed by BatchWriteItem and, for each table, - // information about any item collections that were affected by individual DeleteItem - // or PutItem operations. - // - // Each entry consists of the following subelements: - // - // * ItemCollectionKey - The partition key value of the item collection. - // This is the same as the partition key value of the item. - // - // * SizeEstimateRangeGB - An estimate of item collection size, expressed - // in GB. This is a two-element array containing a lower bound and an upper - // bound for the estimate. The estimate includes the size of all the items - // in the table, plus the size of all attributes projected into all of the - // local secondary indexes on the table. Use this estimate to measure whether - // a local secondary index is approaching its size limit. The estimate is - // subject to change over time; therefore, do not rely on the precision or - // accuracy of the estimate. - ItemCollectionMetrics map[string][]*ItemCollectionMetrics `type:"map"` - - // A map of tables and requests against those tables that were not processed. - // The UnprocessedItems value is in the same form as RequestItems, so you can - // provide this value directly to a subsequent BatchWriteItem operation. For - // more information, see RequestItems in the Request Parameters section. - // - // Each UnprocessedItems entry consists of a table name or table ARN and, for - // that table, a list of operations to perform (DeleteRequest or PutRequest). - // - // * DeleteRequest - Perform a DeleteItem operation on the specified item. - // The item to be deleted is identified by a Key subelement: Key - A map - // of primary key attribute values that uniquely identify the item. Each - // entry in this map consists of an attribute name and an attribute value. - // - // * PutRequest - Perform a PutItem operation on the specified item. The - // item to be put is identified by an Item subelement: Item - A map of attributes - // and their values. Each entry in this map consists of an attribute name - // and an attribute value. Attribute values must not be null; string and - // binary type attributes must have lengths greater than zero; and set type - // attributes must not be empty. Requests that contain empty values will - // be rejected with a ValidationException exception. If you specify any attributes - // that are part of an index key, then the data types for those attributes - // must match those of the schema in the table's attribute definition. - // - // If there are no unprocessed items remaining, the response contains an empty - // UnprocessedItems map. - UnprocessedItems map[string][]*WriteRequest `min:"1" type:"map"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s BatchWriteItemOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s BatchWriteItemOutput) GoString() string { - return s.String() -} - -// SetConsumedCapacity sets the ConsumedCapacity field's value. -func (s *BatchWriteItemOutput) SetConsumedCapacity(v []*ConsumedCapacity) *BatchWriteItemOutput { - s.ConsumedCapacity = v - return s -} - -// SetItemCollectionMetrics sets the ItemCollectionMetrics field's value. -func (s *BatchWriteItemOutput) SetItemCollectionMetrics(v map[string][]*ItemCollectionMetrics) *BatchWriteItemOutput { - s.ItemCollectionMetrics = v - return s -} - -// SetUnprocessedItems sets the UnprocessedItems field's value. -func (s *BatchWriteItemOutput) SetUnprocessedItems(v map[string][]*WriteRequest) *BatchWriteItemOutput { - s.UnprocessedItems = v - return s -} - -// Contains the details for the read/write capacity mode. This page talks about -// PROVISIONED and PAY_PER_REQUEST billing modes. For more information about -// these modes, see Read/write capacity mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html). -// -// You may need to switch to on-demand mode at least once in order to return -// a BillingModeSummary response. -type BillingModeSummary struct { - _ struct{} `type:"structure"` - - // Controls how you are charged for read and write throughput and how you manage - // capacity. This setting can be changed later. - // - // * PROVISIONED - Sets the read/write capacity mode to PROVISIONED. We recommend - // using PROVISIONED for predictable workloads. - // - // * PAY_PER_REQUEST - Sets the read/write capacity mode to PAY_PER_REQUEST. - // We recommend using PAY_PER_REQUEST for unpredictable workloads. - BillingMode *string `type:"string" enum:"BillingMode"` - - // Represents the time when PAY_PER_REQUEST was last set as the read/write capacity - // mode. - LastUpdateToPayPerRequestDateTime *time.Time `type:"timestamp"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s BillingModeSummary) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s BillingModeSummary) GoString() string { - return s.String() -} - -// SetBillingMode sets the BillingMode field's value. -func (s *BillingModeSummary) SetBillingMode(v string) *BillingModeSummary { - s.BillingMode = &v - return s -} - -// SetLastUpdateToPayPerRequestDateTime sets the LastUpdateToPayPerRequestDateTime field's value. -func (s *BillingModeSummary) SetLastUpdateToPayPerRequestDateTime(v time.Time) *BillingModeSummary { - s.LastUpdateToPayPerRequestDateTime = &v - return s -} - -// An ordered list of errors for each item in the request which caused the transaction -// to get cancelled. The values of the list are ordered according to the ordering -// of the TransactWriteItems request parameter. If no error occurred for the -// associated item an error with a Null code and Null message will be present. -type CancellationReason struct { - _ struct{} `type:"structure"` - - // Status code for the result of the cancelled transaction. - Code *string `type:"string"` - - // Item in the request which caused the transaction to get cancelled. - Item map[string]*AttributeValue `type:"map"` - - // Cancellation reason message description. - Message *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CancellationReason) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CancellationReason) GoString() string { - return s.String() -} - -// SetCode sets the Code field's value. -func (s *CancellationReason) SetCode(v string) *CancellationReason { - s.Code = &v - return s -} - -// SetItem sets the Item field's value. -func (s *CancellationReason) SetItem(v map[string]*AttributeValue) *CancellationReason { - s.Item = v - return s -} - -// SetMessage sets the Message field's value. -func (s *CancellationReason) SetMessage(v string) *CancellationReason { - s.Message = &v - return s -} - -// Represents the amount of provisioned throughput capacity consumed on a table -// or an index. -type Capacity struct { - _ struct{} `type:"structure"` - - // The total number of capacity units consumed on a table or an index. - CapacityUnits *float64 `type:"double"` - - // The total number of read capacity units consumed on a table or an index. - ReadCapacityUnits *float64 `type:"double"` - - // The total number of write capacity units consumed on a table or an index. - WriteCapacityUnits *float64 `type:"double"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Capacity) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Capacity) GoString() string { - return s.String() -} - -// SetCapacityUnits sets the CapacityUnits field's value. -func (s *Capacity) SetCapacityUnits(v float64) *Capacity { - s.CapacityUnits = &v - return s -} - -// SetReadCapacityUnits sets the ReadCapacityUnits field's value. -func (s *Capacity) SetReadCapacityUnits(v float64) *Capacity { - s.ReadCapacityUnits = &v - return s -} - -// SetWriteCapacityUnits sets the WriteCapacityUnits field's value. -func (s *Capacity) SetWriteCapacityUnits(v float64) *Capacity { - s.WriteCapacityUnits = &v - return s -} - -// Represents the selection criteria for a Query or Scan operation: -// -// - For a Query operation, Condition is used for specifying the KeyConditions -// to use when querying a table or an index. For KeyConditions, only the -// following comparison operators are supported: EQ | LE | LT | GE | GT | -// BEGINS_WITH | BETWEEN Condition is also used in a QueryFilter, which evaluates -// the query results and returns only the desired values. -// -// - For a Scan operation, Condition is used in a ScanFilter, which evaluates -// the scan results and returns only the desired values. -type Condition struct { - _ struct{} `type:"structure"` - - // One or more values to evaluate against the supplied attribute. The number - // of values in the list depends on the ComparisonOperator being used. - // - // For type Number, value comparisons are numeric. - // - // String value comparisons for greater than, equals, or less than are based - // on ASCII character code values. For example, a is greater than A, and a is - // greater than B. For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters - // (http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters). - // - // For Binary, DynamoDB treats each byte of the binary data as unsigned when - // it compares binary values. - AttributeValueList []*AttributeValue `type:"list"` - - // A comparator for evaluating attributes. For example, equals, greater than, - // less than, etc. - // - // The following comparison operators are available: - // - // EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS | - // BEGINS_WITH | IN | BETWEEN - // - // The following are descriptions of each comparison operator. - // - // * EQ : Equal. EQ is supported for all data types, including lists and - // maps. AttributeValueList can contain only one AttributeValue element of - // type String, Number, Binary, String Set, Number Set, or Binary Set. If - // an item contains an AttributeValue element of a different type than the - // one provided in the request, the value does not match. For example, {"S":"6"} - // does not equal {"N":"6"}. Also, {"N":"6"} does not equal {"NS":["6", "2", - // "1"]}. - // - // * NE : Not equal. NE is supported for all data types, including lists - // and maps. AttributeValueList can contain only one AttributeValue of type - // String, Number, Binary, String Set, Number Set, or Binary Set. If an item - // contains an AttributeValue of a different type than the one provided in - // the request, the value does not match. For example, {"S":"6"} does not - // equal {"N":"6"}. Also, {"N":"6"} does not equal {"NS":["6", "2", "1"]}. - // - // * LE : Less than or equal. AttributeValueList can contain only one AttributeValue - // element of type String, Number, or Binary (not a set type). If an item - // contains an AttributeValue element of a different type than the one provided - // in the request, the value does not match. For example, {"S":"6"} does - // not equal {"N":"6"}. Also, {"N":"6"} does not compare to {"NS":["6", "2", - // "1"]}. - // - // * LT : Less than. AttributeValueList can contain only one AttributeValue - // of type String, Number, or Binary (not a set type). If an item contains - // an AttributeValue element of a different type than the one provided in - // the request, the value does not match. For example, {"S":"6"} does not - // equal {"N":"6"}. Also, {"N":"6"} does not compare to {"NS":["6", "2", - // "1"]}. - // - // * GE : Greater than or equal. AttributeValueList can contain only one - // AttributeValue element of type String, Number, or Binary (not a set type). - // If an item contains an AttributeValue element of a different type than - // the one provided in the request, the value does not match. For example, - // {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} does not compare to - // {"NS":["6", "2", "1"]}. - // - // * GT : Greater than. AttributeValueList can contain only one AttributeValue - // element of type String, Number, or Binary (not a set type). If an item - // contains an AttributeValue element of a different type than the one provided - // in the request, the value does not match. For example, {"S":"6"} does - // not equal {"N":"6"}. Also, {"N":"6"} does not compare to {"NS":["6", "2", - // "1"]}. - // - // * NOT_NULL : The attribute exists. NOT_NULL is supported for all data - // types, including lists and maps. This operator tests for the existence - // of an attribute, not its data type. If the data type of attribute "a" - // is null, and you evaluate it using NOT_NULL, the result is a Boolean true. - // This result is because the attribute "a" exists; its data type is not - // relevant to the NOT_NULL comparison operator. - // - // * NULL : The attribute does not exist. NULL is supported for all data - // types, including lists and maps. This operator tests for the nonexistence - // of an attribute, not its data type. If the data type of attribute "a" - // is null, and you evaluate it using NULL, the result is a Boolean false. - // This is because the attribute "a" exists; its data type is not relevant - // to the NULL comparison operator. - // - // * CONTAINS : Checks for a subsequence, or value in a set. AttributeValueList - // can contain only one AttributeValue element of type String, Number, or - // Binary (not a set type). If the target attribute of the comparison is - // of type String, then the operator checks for a substring match. If the - // target attribute of the comparison is of type Binary, then the operator - // looks for a subsequence of the target that matches the input. If the target - // attribute of the comparison is a set ("SS", "NS", or "BS"), then the operator - // evaluates to true if it finds an exact match with any member of the set. - // CONTAINS is supported for lists: When evaluating "a CONTAINS b", "a" can - // be a list; however, "b" cannot be a set, a map, or a list. - // - // * NOT_CONTAINS : Checks for absence of a subsequence, or absence of a - // value in a set. AttributeValueList can contain only one AttributeValue - // element of type String, Number, or Binary (not a set type). If the target - // attribute of the comparison is a String, then the operator checks for - // the absence of a substring match. If the target attribute of the comparison - // is Binary, then the operator checks for the absence of a subsequence of - // the target that matches the input. If the target attribute of the comparison - // is a set ("SS", "NS", or "BS"), then the operator evaluates to true if - // it does not find an exact match with any member of the set. NOT_CONTAINS - // is supported for lists: When evaluating "a NOT CONTAINS b", "a" can be - // a list; however, "b" cannot be a set, a map, or a list. - // - // * BEGINS_WITH : Checks for a prefix. AttributeValueList can contain only - // one AttributeValue of type String or Binary (not a Number or a set type). - // The target attribute of the comparison must be of type String or Binary - // (not a Number or a set type). - // - // * IN : Checks for matching elements in a list. AttributeValueList can - // contain one or more AttributeValue elements of type String, Number, or - // Binary. These attributes are compared against an existing attribute of - // an item. If any elements of the input are equal to the item attribute, - // the expression evaluates to true. - // - // * BETWEEN : Greater than or equal to the first value, and less than or - // equal to the second value. AttributeValueList must contain two AttributeValue - // elements of the same type, either String, Number, or Binary (not a set - // type). A target attribute matches if the target value is greater than, - // or equal to, the first element and less than, or equal to, the second - // element. If an item contains an AttributeValue element of a different - // type than the one provided in the request, the value does not match. For - // example, {"S":"6"} does not compare to {"N":"6"}. Also, {"N":"6"} does - // not compare to {"NS":["6", "2", "1"]} - // - // For usage examples of AttributeValueList and ComparisonOperator, see Legacy - // Conditional Parameters (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.html) - // in the Amazon DynamoDB Developer Guide. - // - // ComparisonOperator is a required field - ComparisonOperator *string `type:"string" required:"true" enum:"ComparisonOperator"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Condition) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Condition) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *Condition) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Condition"} - if s.ComparisonOperator == nil { - invalidParams.Add(request.NewErrParamRequired("ComparisonOperator")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAttributeValueList sets the AttributeValueList field's value. -func (s *Condition) SetAttributeValueList(v []*AttributeValue) *Condition { - s.AttributeValueList = v - return s -} - -// SetComparisonOperator sets the ComparisonOperator field's value. -func (s *Condition) SetComparisonOperator(v string) *Condition { - s.ComparisonOperator = &v - return s -} - -// Represents a request to perform a check that an item exists or to check the -// condition of specific attributes of the item. -type ConditionCheck struct { - _ struct{} `type:"structure"` - - // A condition that must be satisfied in order for a conditional update to succeed. - // For more information, see Condition expressions (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.ConditionExpressions.html) - // in the Amazon DynamoDB Developer Guide. - // - // ConditionExpression is a required field - ConditionExpression *string `type:"string" required:"true"` - - // One or more substitution tokens for attribute names in an expression. For - // more information, see Expression attribute names (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.ExpressionAttributeNames.html) - // in the Amazon DynamoDB Developer Guide. - ExpressionAttributeNames map[string]*string `type:"map"` - - // One or more values that can be substituted in an expression. For more information, - // see Condition expressions (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.ConditionExpressions.html) - // in the Amazon DynamoDB Developer Guide. - ExpressionAttributeValues map[string]*AttributeValue `type:"map"` - - // The primary key of the item to be checked. Each element consists of an attribute - // name and a value for that attribute. - // - // Key is a required field - Key map[string]*AttributeValue `type:"map" required:"true"` - - // Use ReturnValuesOnConditionCheckFailure to get the item attributes if the - // ConditionCheck condition fails. For ReturnValuesOnConditionCheckFailure, - // the valid values are: NONE and ALL_OLD. - ReturnValuesOnConditionCheckFailure *string `type:"string" enum:"ReturnValuesOnConditionCheckFailure"` - - // Name of the table for the check item request. You can also provide the Amazon - // Resource Name (ARN) of the table in this parameter. - // - // TableName is a required field - TableName *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ConditionCheck) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ConditionCheck) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ConditionCheck) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ConditionCheck"} - if s.ConditionExpression == nil { - invalidParams.Add(request.NewErrParamRequired("ConditionExpression")) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.TableName == nil { - invalidParams.Add(request.NewErrParamRequired("TableName")) - } - if s.TableName != nil && len(*s.TableName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetConditionExpression sets the ConditionExpression field's value. -func (s *ConditionCheck) SetConditionExpression(v string) *ConditionCheck { - s.ConditionExpression = &v - return s -} - -// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value. -func (s *ConditionCheck) SetExpressionAttributeNames(v map[string]*string) *ConditionCheck { - s.ExpressionAttributeNames = v - return s -} - -// SetExpressionAttributeValues sets the ExpressionAttributeValues field's value. -func (s *ConditionCheck) SetExpressionAttributeValues(v map[string]*AttributeValue) *ConditionCheck { - s.ExpressionAttributeValues = v - return s -} - -// SetKey sets the Key field's value. -func (s *ConditionCheck) SetKey(v map[string]*AttributeValue) *ConditionCheck { - s.Key = v - return s -} - -// SetReturnValuesOnConditionCheckFailure sets the ReturnValuesOnConditionCheckFailure field's value. -func (s *ConditionCheck) SetReturnValuesOnConditionCheckFailure(v string) *ConditionCheck { - s.ReturnValuesOnConditionCheckFailure = &v - return s -} - -// SetTableName sets the TableName field's value. -func (s *ConditionCheck) SetTableName(v string) *ConditionCheck { - s.TableName = &v - return s -} - -// A condition specified in the operation could not be evaluated. -type ConditionalCheckFailedException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - // Item which caused the ConditionalCheckFailedException. - Item map[string]*AttributeValue `type:"map"` - - // The conditional request failed. - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ConditionalCheckFailedException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ConditionalCheckFailedException) GoString() string { - return s.String() -} - -func newErrorConditionalCheckFailedException(v protocol.ResponseMetadata) error { - return &ConditionalCheckFailedException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *ConditionalCheckFailedException) Code() string { - return "ConditionalCheckFailedException" -} - -// Message returns the exception's message. -func (s *ConditionalCheckFailedException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *ConditionalCheckFailedException) OrigErr() error { - return nil -} - -func (s *ConditionalCheckFailedException) Error() string { - return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *ConditionalCheckFailedException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *ConditionalCheckFailedException) RequestID() string { - return s.RespMetadata.RequestID -} - -// The capacity units consumed by an operation. The data returned includes the -// total provisioned throughput consumed, along with statistics for the table -// and any indexes involved in the operation. ConsumedCapacity is only returned -// if the request asked for it. For more information, see Provisioned capacity -// mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/provisioned-capacity-mode.html) -// in the Amazon DynamoDB Developer Guide. -type ConsumedCapacity struct { - _ struct{} `type:"structure"` - - // The total number of capacity units consumed by the operation. - CapacityUnits *float64 `type:"double"` - - // The amount of throughput consumed on each global index affected by the operation. - GlobalSecondaryIndexes map[string]*Capacity `type:"map"` - - // The amount of throughput consumed on each local index affected by the operation. - LocalSecondaryIndexes map[string]*Capacity `type:"map"` - - // The total number of read capacity units consumed by the operation. - ReadCapacityUnits *float64 `type:"double"` - - // The amount of throughput consumed on the table affected by the operation. - Table *Capacity `type:"structure"` - - // The name of the table that was affected by the operation. If you had specified - // the Amazon Resource Name (ARN) of a table in the input, you'll see the table - // ARN in the response. - TableName *string `min:"1" type:"string"` - - // The total number of write capacity units consumed by the operation. - WriteCapacityUnits *float64 `type:"double"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ConsumedCapacity) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ConsumedCapacity) GoString() string { - return s.String() -} - -// SetCapacityUnits sets the CapacityUnits field's value. -func (s *ConsumedCapacity) SetCapacityUnits(v float64) *ConsumedCapacity { - s.CapacityUnits = &v - return s -} - -// SetGlobalSecondaryIndexes sets the GlobalSecondaryIndexes field's value. -func (s *ConsumedCapacity) SetGlobalSecondaryIndexes(v map[string]*Capacity) *ConsumedCapacity { - s.GlobalSecondaryIndexes = v - return s -} - -// SetLocalSecondaryIndexes sets the LocalSecondaryIndexes field's value. -func (s *ConsumedCapacity) SetLocalSecondaryIndexes(v map[string]*Capacity) *ConsumedCapacity { - s.LocalSecondaryIndexes = v - return s -} - -// SetReadCapacityUnits sets the ReadCapacityUnits field's value. -func (s *ConsumedCapacity) SetReadCapacityUnits(v float64) *ConsumedCapacity { - s.ReadCapacityUnits = &v - return s -} - -// SetTable sets the Table field's value. -func (s *ConsumedCapacity) SetTable(v *Capacity) *ConsumedCapacity { - s.Table = v - return s -} - -// SetTableName sets the TableName field's value. -func (s *ConsumedCapacity) SetTableName(v string) *ConsumedCapacity { - s.TableName = &v - return s -} - -// SetWriteCapacityUnits sets the WriteCapacityUnits field's value. -func (s *ConsumedCapacity) SetWriteCapacityUnits(v float64) *ConsumedCapacity { - s.WriteCapacityUnits = &v - return s -} - -// Represents the continuous backups and point in time recovery settings on -// the table. -type ContinuousBackupsDescription struct { - _ struct{} `type:"structure"` - - // ContinuousBackupsStatus can be one of the following states: ENABLED, DISABLED - // - // ContinuousBackupsStatus is a required field - ContinuousBackupsStatus *string `type:"string" required:"true" enum:"ContinuousBackupsStatus"` - - // The description of the point in time recovery settings applied to the table. - PointInTimeRecoveryDescription *PointInTimeRecoveryDescription `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ContinuousBackupsDescription) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ContinuousBackupsDescription) GoString() string { - return s.String() -} - -// SetContinuousBackupsStatus sets the ContinuousBackupsStatus field's value. -func (s *ContinuousBackupsDescription) SetContinuousBackupsStatus(v string) *ContinuousBackupsDescription { - s.ContinuousBackupsStatus = &v - return s -} - -// SetPointInTimeRecoveryDescription sets the PointInTimeRecoveryDescription field's value. -func (s *ContinuousBackupsDescription) SetPointInTimeRecoveryDescription(v *PointInTimeRecoveryDescription) *ContinuousBackupsDescription { - s.PointInTimeRecoveryDescription = v - return s -} - -// Backups have not yet been enabled for this table. -type ContinuousBackupsUnavailableException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ContinuousBackupsUnavailableException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ContinuousBackupsUnavailableException) GoString() string { - return s.String() -} - -func newErrorContinuousBackupsUnavailableException(v protocol.ResponseMetadata) error { - return &ContinuousBackupsUnavailableException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *ContinuousBackupsUnavailableException) Code() string { - return "ContinuousBackupsUnavailableException" -} - -// Message returns the exception's message. -func (s *ContinuousBackupsUnavailableException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *ContinuousBackupsUnavailableException) OrigErr() error { - return nil -} - -func (s *ContinuousBackupsUnavailableException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *ContinuousBackupsUnavailableException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *ContinuousBackupsUnavailableException) RequestID() string { - return s.RespMetadata.RequestID -} - -// Represents a Contributor Insights summary entry. -type ContributorInsightsSummary struct { - _ struct{} `type:"structure"` - - // Describes the current status for contributor insights for the given table - // and index, if applicable. - ContributorInsightsStatus *string `type:"string" enum:"ContributorInsightsStatus"` - - // Name of the index associated with the summary, if any. - IndexName *string `min:"3" type:"string"` - - // Name of the table associated with the summary. - TableName *string `min:"3" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ContributorInsightsSummary) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ContributorInsightsSummary) GoString() string { - return s.String() -} - -// SetContributorInsightsStatus sets the ContributorInsightsStatus field's value. -func (s *ContributorInsightsSummary) SetContributorInsightsStatus(v string) *ContributorInsightsSummary { - s.ContributorInsightsStatus = &v - return s -} - -// SetIndexName sets the IndexName field's value. -func (s *ContributorInsightsSummary) SetIndexName(v string) *ContributorInsightsSummary { - s.IndexName = &v - return s -} - -// SetTableName sets the TableName field's value. -func (s *ContributorInsightsSummary) SetTableName(v string) *ContributorInsightsSummary { - s.TableName = &v - return s -} - -type CreateBackupInput struct { - _ struct{} `type:"structure"` - - // Specified name for the backup. - // - // BackupName is a required field - BackupName *string `min:"3" type:"string" required:"true"` - - // The name of the table. You can also provide the Amazon Resource Name (ARN) - // of the table in this parameter. - // - // TableName is a required field - TableName *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateBackupInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateBackupInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateBackupInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateBackupInput"} - if s.BackupName == nil { - invalidParams.Add(request.NewErrParamRequired("BackupName")) - } - if s.BackupName != nil && len(*s.BackupName) < 3 { - invalidParams.Add(request.NewErrParamMinLen("BackupName", 3)) - } - if s.TableName == nil { - invalidParams.Add(request.NewErrParamRequired("TableName")) - } - if s.TableName != nil && len(*s.TableName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBackupName sets the BackupName field's value. -func (s *CreateBackupInput) SetBackupName(v string) *CreateBackupInput { - s.BackupName = &v - return s -} - -// SetTableName sets the TableName field's value. -func (s *CreateBackupInput) SetTableName(v string) *CreateBackupInput { - s.TableName = &v - return s -} - -type CreateBackupOutput struct { - _ struct{} `type:"structure"` - - // Contains the details of the backup created for the table. - BackupDetails *BackupDetails `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateBackupOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateBackupOutput) GoString() string { - return s.String() -} - -// SetBackupDetails sets the BackupDetails field's value. -func (s *CreateBackupOutput) SetBackupDetails(v *BackupDetails) *CreateBackupOutput { - s.BackupDetails = v - return s -} - -// Represents a new global secondary index to be added to an existing table. -type CreateGlobalSecondaryIndexAction struct { - _ struct{} `type:"structure"` - - // The name of the global secondary index to be created. - // - // IndexName is a required field - IndexName *string `min:"3" type:"string" required:"true"` - - // The key schema for the global secondary index. - // - // KeySchema is a required field - KeySchema []*KeySchemaElement `min:"1" type:"list" required:"true"` - - // The maximum number of read and write units for the global secondary index - // being created. If you use this parameter, you must specify MaxReadRequestUnits, - // MaxWriteRequestUnits, or both. - OnDemandThroughput *OnDemandThroughput `type:"structure"` - - // Represents attributes that are copied (projected) from the table into an - // index. These are in addition to the primary key attributes and index key - // attributes, which are automatically projected. - // - // Projection is a required field - Projection *Projection `type:"structure" required:"true"` - - // Represents the provisioned throughput settings for the specified global secondary - // index. - // - // For current minimum and maximum provisioned throughput values, see Service, - // Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) - // in the Amazon DynamoDB Developer Guide. - ProvisionedThroughput *ProvisionedThroughput `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateGlobalSecondaryIndexAction) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateGlobalSecondaryIndexAction) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateGlobalSecondaryIndexAction) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateGlobalSecondaryIndexAction"} - if s.IndexName == nil { - invalidParams.Add(request.NewErrParamRequired("IndexName")) - } - if s.IndexName != nil && len(*s.IndexName) < 3 { - invalidParams.Add(request.NewErrParamMinLen("IndexName", 3)) - } - if s.KeySchema == nil { - invalidParams.Add(request.NewErrParamRequired("KeySchema")) - } - if s.KeySchema != nil && len(s.KeySchema) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeySchema", 1)) - } - if s.Projection == nil { - invalidParams.Add(request.NewErrParamRequired("Projection")) - } - if s.KeySchema != nil { - for i, v := range s.KeySchema { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "KeySchema", i), err.(request.ErrInvalidParams)) - } - } - } - if s.Projection != nil { - if err := s.Projection.Validate(); err != nil { - invalidParams.AddNested("Projection", err.(request.ErrInvalidParams)) - } - } - if s.ProvisionedThroughput != nil { - if err := s.ProvisionedThroughput.Validate(); err != nil { - invalidParams.AddNested("ProvisionedThroughput", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetIndexName sets the IndexName field's value. -func (s *CreateGlobalSecondaryIndexAction) SetIndexName(v string) *CreateGlobalSecondaryIndexAction { - s.IndexName = &v - return s -} - -// SetKeySchema sets the KeySchema field's value. -func (s *CreateGlobalSecondaryIndexAction) SetKeySchema(v []*KeySchemaElement) *CreateGlobalSecondaryIndexAction { - s.KeySchema = v - return s -} - -// SetOnDemandThroughput sets the OnDemandThroughput field's value. -func (s *CreateGlobalSecondaryIndexAction) SetOnDemandThroughput(v *OnDemandThroughput) *CreateGlobalSecondaryIndexAction { - s.OnDemandThroughput = v - return s -} - -// SetProjection sets the Projection field's value. -func (s *CreateGlobalSecondaryIndexAction) SetProjection(v *Projection) *CreateGlobalSecondaryIndexAction { - s.Projection = v - return s -} - -// SetProvisionedThroughput sets the ProvisionedThroughput field's value. -func (s *CreateGlobalSecondaryIndexAction) SetProvisionedThroughput(v *ProvisionedThroughput) *CreateGlobalSecondaryIndexAction { - s.ProvisionedThroughput = v - return s -} - -type CreateGlobalTableInput struct { - _ struct{} `type:"structure"` - - // The global table name. - // - // GlobalTableName is a required field - GlobalTableName *string `min:"3" type:"string" required:"true"` - - // The Regions where the global table needs to be created. - // - // ReplicationGroup is a required field - ReplicationGroup []*Replica `type:"list" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateGlobalTableInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateGlobalTableInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateGlobalTableInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateGlobalTableInput"} - if s.GlobalTableName == nil { - invalidParams.Add(request.NewErrParamRequired("GlobalTableName")) - } - if s.GlobalTableName != nil && len(*s.GlobalTableName) < 3 { - invalidParams.Add(request.NewErrParamMinLen("GlobalTableName", 3)) - } - if s.ReplicationGroup == nil { - invalidParams.Add(request.NewErrParamRequired("ReplicationGroup")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetGlobalTableName sets the GlobalTableName field's value. -func (s *CreateGlobalTableInput) SetGlobalTableName(v string) *CreateGlobalTableInput { - s.GlobalTableName = &v - return s -} - -// SetReplicationGroup sets the ReplicationGroup field's value. -func (s *CreateGlobalTableInput) SetReplicationGroup(v []*Replica) *CreateGlobalTableInput { - s.ReplicationGroup = v - return s -} - -type CreateGlobalTableOutput struct { - _ struct{} `type:"structure"` - - // Contains the details of the global table. - GlobalTableDescription *GlobalTableDescription `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateGlobalTableOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateGlobalTableOutput) GoString() string { - return s.String() -} - -// SetGlobalTableDescription sets the GlobalTableDescription field's value. -func (s *CreateGlobalTableOutput) SetGlobalTableDescription(v *GlobalTableDescription) *CreateGlobalTableOutput { - s.GlobalTableDescription = v - return s -} - -// Represents a replica to be added. -type CreateReplicaAction struct { - _ struct{} `type:"structure"` - - // The Region of the replica to be added. - // - // RegionName is a required field - RegionName *string `type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateReplicaAction) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateReplicaAction) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateReplicaAction) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateReplicaAction"} - if s.RegionName == nil { - invalidParams.Add(request.NewErrParamRequired("RegionName")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetRegionName sets the RegionName field's value. -func (s *CreateReplicaAction) SetRegionName(v string) *CreateReplicaAction { - s.RegionName = &v - return s -} - -// Represents a replica to be created. -type CreateReplicationGroupMemberAction struct { - _ struct{} `type:"structure"` - - // Replica-specific global secondary index settings. - GlobalSecondaryIndexes []*ReplicaGlobalSecondaryIndex `min:"1" type:"list"` - - // The KMS key that should be used for KMS encryption in the new replica. To - // specify a key, use its key ID, Amazon Resource Name (ARN), alias name, or - // alias ARN. Note that you should only provide this parameter if the key is - // different from the default DynamoDB KMS key alias/aws/dynamodb. - KMSMasterKeyId *string `type:"string"` - - // The maximum on-demand throughput settings for the specified replica table - // being created. You can only modify MaxReadRequestUnits, because you can't - // modify MaxWriteRequestUnits for individual replica tables. - OnDemandThroughputOverride *OnDemandThroughputOverride `type:"structure"` - - // Replica-specific provisioned throughput. If not specified, uses the source - // table's provisioned throughput settings. - ProvisionedThroughputOverride *ProvisionedThroughputOverride `type:"structure"` - - // The Region where the new replica will be created. - // - // RegionName is a required field - RegionName *string `type:"string" required:"true"` - - // Replica-specific table class. If not specified, uses the source table's table - // class. - TableClassOverride *string `type:"string" enum:"TableClass"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateReplicationGroupMemberAction) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateReplicationGroupMemberAction) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateReplicationGroupMemberAction) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateReplicationGroupMemberAction"} - if s.GlobalSecondaryIndexes != nil && len(s.GlobalSecondaryIndexes) < 1 { - invalidParams.Add(request.NewErrParamMinLen("GlobalSecondaryIndexes", 1)) - } - if s.RegionName == nil { - invalidParams.Add(request.NewErrParamRequired("RegionName")) - } - if s.GlobalSecondaryIndexes != nil { - for i, v := range s.GlobalSecondaryIndexes { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "GlobalSecondaryIndexes", i), err.(request.ErrInvalidParams)) - } - } - } - if s.ProvisionedThroughputOverride != nil { - if err := s.ProvisionedThroughputOverride.Validate(); err != nil { - invalidParams.AddNested("ProvisionedThroughputOverride", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetGlobalSecondaryIndexes sets the GlobalSecondaryIndexes field's value. -func (s *CreateReplicationGroupMemberAction) SetGlobalSecondaryIndexes(v []*ReplicaGlobalSecondaryIndex) *CreateReplicationGroupMemberAction { - s.GlobalSecondaryIndexes = v - return s -} - -// SetKMSMasterKeyId sets the KMSMasterKeyId field's value. -func (s *CreateReplicationGroupMemberAction) SetKMSMasterKeyId(v string) *CreateReplicationGroupMemberAction { - s.KMSMasterKeyId = &v - return s -} - -// SetOnDemandThroughputOverride sets the OnDemandThroughputOverride field's value. -func (s *CreateReplicationGroupMemberAction) SetOnDemandThroughputOverride(v *OnDemandThroughputOverride) *CreateReplicationGroupMemberAction { - s.OnDemandThroughputOverride = v - return s -} - -// SetProvisionedThroughputOverride sets the ProvisionedThroughputOverride field's value. -func (s *CreateReplicationGroupMemberAction) SetProvisionedThroughputOverride(v *ProvisionedThroughputOverride) *CreateReplicationGroupMemberAction { - s.ProvisionedThroughputOverride = v - return s -} - -// SetRegionName sets the RegionName field's value. -func (s *CreateReplicationGroupMemberAction) SetRegionName(v string) *CreateReplicationGroupMemberAction { - s.RegionName = &v - return s -} - -// SetTableClassOverride sets the TableClassOverride field's value. -func (s *CreateReplicationGroupMemberAction) SetTableClassOverride(v string) *CreateReplicationGroupMemberAction { - s.TableClassOverride = &v - return s -} - -// Represents the input of a CreateTable operation. -type CreateTableInput struct { - _ struct{} `type:"structure"` - - // An array of attributes that describe the key schema for the table and indexes. - // - // AttributeDefinitions is a required field - AttributeDefinitions []*AttributeDefinition `type:"list" required:"true"` - - // Controls how you are charged for read and write throughput and how you manage - // capacity. This setting can be changed later. - // - // * PROVISIONED - We recommend using PROVISIONED for predictable workloads. - // PROVISIONED sets the billing mode to Provisioned capacity mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/provisioned-capacity-mode.html). - // - // * PAY_PER_REQUEST - We recommend using PAY_PER_REQUEST for unpredictable - // workloads. PAY_PER_REQUEST sets the billing mode to On-demand capacity - // mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/on-demand-capacity-mode.html). - BillingMode *string `type:"string" enum:"BillingMode"` - - // Indicates whether deletion protection is to be enabled (true) or disabled - // (false) on the table. - DeletionProtectionEnabled *bool `type:"boolean"` - - // One or more global secondary indexes (the maximum is 20) to be created on - // the table. Each global secondary index in the array includes the following: - // - // * IndexName - The name of the global secondary index. Must be unique only - // for this table. - // - // * KeySchema - Specifies the key schema for the global secondary index. - // - // * Projection - Specifies attributes that are copied (projected) from the - // table into the index. These are in addition to the primary key attributes - // and index key attributes, which are automatically projected. Each attribute - // specification is composed of: ProjectionType - One of the following: KEYS_ONLY - // - Only the index and primary keys are projected into the index. INCLUDE - // - Only the specified table attributes are projected into the index. The - // list of projected attributes is in NonKeyAttributes. ALL - All of the - // table attributes are projected into the index. NonKeyAttributes - A list - // of one or more non-key attribute names that are projected into the secondary - // index. The total count of attributes provided in NonKeyAttributes, summed - // across all of the secondary indexes, must not exceed 100. If you project - // the same attribute into two different indexes, this counts as two distinct - // attributes when determining the total. - // - // * ProvisionedThroughput - The provisioned throughput settings for the - // global secondary index, consisting of read and write capacity units. - GlobalSecondaryIndexes []*GlobalSecondaryIndex `type:"list"` - - // Specifies the attributes that make up the primary key for a table or an index. - // The attributes in KeySchema must also be defined in the AttributeDefinitions - // array. For more information, see Data Model (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html) - // in the Amazon DynamoDB Developer Guide. - // - // Each KeySchemaElement in the array is composed of: - // - // * AttributeName - The name of this key attribute. - // - // * KeyType - The role that the key attribute will assume: HASH - partition - // key RANGE - sort key - // - // The partition key of an item is also known as its hash attribute. The term - // "hash attribute" derives from the DynamoDB usage of an internal hash function - // to evenly distribute data items across partitions, based on their partition - // key values. - // - // The sort key of an item is also known as its range attribute. The term "range - // attribute" derives from the way DynamoDB stores items with the same partition - // key physically close together, in sorted order by the sort key value. - // - // For a simple primary key (partition key), you must provide exactly one element - // with a KeyType of HASH. - // - // For a composite primary key (partition key and sort key), you must provide - // exactly two elements, in this order: The first element must have a KeyType - // of HASH, and the second element must have a KeyType of RANGE. - // - // For more information, see Working with Tables (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#WorkingWithTables.primary.key) - // in the Amazon DynamoDB Developer Guide. - // - // KeySchema is a required field - KeySchema []*KeySchemaElement `min:"1" type:"list" required:"true"` - - // One or more local secondary indexes (the maximum is 5) to be created on the - // table. Each index is scoped to a given partition key value. There is a 10 - // GB size limit per partition key value; otherwise, the size of a local secondary - // index is unconstrained. - // - // Each local secondary index in the array includes the following: - // - // * IndexName - The name of the local secondary index. Must be unique only - // for this table. - // - // * KeySchema - Specifies the key schema for the local secondary index. - // The key schema must begin with the same partition key as the table. - // - // * Projection - Specifies attributes that are copied (projected) from the - // table into the index. These are in addition to the primary key attributes - // and index key attributes, which are automatically projected. Each attribute - // specification is composed of: ProjectionType - One of the following: KEYS_ONLY - // - Only the index and primary keys are projected into the index. INCLUDE - // - Only the specified table attributes are projected into the index. The - // list of projected attributes is in NonKeyAttributes. ALL - All of the - // table attributes are projected into the index. NonKeyAttributes - A list - // of one or more non-key attribute names that are projected into the secondary - // index. The total count of attributes provided in NonKeyAttributes, summed - // across all of the secondary indexes, must not exceed 100. If you project - // the same attribute into two different indexes, this counts as two distinct - // attributes when determining the total. - LocalSecondaryIndexes []*LocalSecondaryIndex `type:"list"` - - // Sets the maximum number of read and write units for the specified table in - // on-demand capacity mode. If you use this parameter, you must specify MaxReadRequestUnits, - // MaxWriteRequestUnits, or both. - OnDemandThroughput *OnDemandThroughput `type:"structure"` - - // Represents the provisioned throughput settings for a specified table or index. - // The settings can be modified using the UpdateTable operation. - // - // If you set BillingMode as PROVISIONED, you must specify this property. If - // you set BillingMode as PAY_PER_REQUEST, you cannot specify this property. - // - // For current minimum and maximum provisioned throughput values, see Service, - // Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) - // in the Amazon DynamoDB Developer Guide. - ProvisionedThroughput *ProvisionedThroughput `type:"structure"` - - // An Amazon Web Services resource-based policy document in JSON format that - // will be attached to the table. - // - // When you attach a resource-based policy while creating a table, the policy - // application is strongly consistent. - // - // The maximum size supported for a resource-based policy document is 20 KB. - // DynamoDB counts whitespaces when calculating the size of a policy against - // this limit. For a full list of all considerations that apply for resource-based - // policies, see Resource-based policy considerations (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/rbac-considerations.html). - // - // You need to specify the CreateTable and PutResourcePolicy IAM actions for - // authorizing a user to create a table with a resource-based policy. - ResourcePolicy *string `type:"string"` - - // Represents the settings used to enable server-side encryption. - SSESpecification *SSESpecification `type:"structure"` - - // The settings for DynamoDB Streams on the table. These settings consist of: - // - // * StreamEnabled - Indicates whether DynamoDB Streams is to be enabled - // (true) or disabled (false). - // - // * StreamViewType - When an item in the table is modified, StreamViewType - // determines what information is written to the table's stream. Valid values - // for StreamViewType are: KEYS_ONLY - Only the key attributes of the modified - // item are written to the stream. NEW_IMAGE - The entire item, as it appears - // after it was modified, is written to the stream. OLD_IMAGE - The entire - // item, as it appeared before it was modified, is written to the stream. - // NEW_AND_OLD_IMAGES - Both the new and the old item images of the item - // are written to the stream. - StreamSpecification *StreamSpecification `type:"structure"` - - // The table class of the new table. Valid values are STANDARD and STANDARD_INFREQUENT_ACCESS. - TableClass *string `type:"string" enum:"TableClass"` - - // The name of the table to create. You can also provide the Amazon Resource - // Name (ARN) of the table in this parameter. - // - // TableName is a required field - TableName *string `min:"1" type:"string" required:"true"` - - // A list of key-value pairs to label the table. For more information, see Tagging - // for DynamoDB (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html). - Tags []*Tag `type:"list"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateTableInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateTableInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateTableInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateTableInput"} - if s.AttributeDefinitions == nil { - invalidParams.Add(request.NewErrParamRequired("AttributeDefinitions")) - } - if s.KeySchema == nil { - invalidParams.Add(request.NewErrParamRequired("KeySchema")) - } - if s.KeySchema != nil && len(s.KeySchema) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeySchema", 1)) - } - if s.TableName == nil { - invalidParams.Add(request.NewErrParamRequired("TableName")) - } - if s.TableName != nil && len(*s.TableName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) - } - if s.AttributeDefinitions != nil { - for i, v := range s.AttributeDefinitions { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AttributeDefinitions", i), err.(request.ErrInvalidParams)) - } - } - } - if s.GlobalSecondaryIndexes != nil { - for i, v := range s.GlobalSecondaryIndexes { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "GlobalSecondaryIndexes", i), err.(request.ErrInvalidParams)) - } - } - } - if s.KeySchema != nil { - for i, v := range s.KeySchema { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "KeySchema", i), err.(request.ErrInvalidParams)) - } - } - } - if s.LocalSecondaryIndexes != nil { - for i, v := range s.LocalSecondaryIndexes { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "LocalSecondaryIndexes", i), err.(request.ErrInvalidParams)) - } - } - } - if s.ProvisionedThroughput != nil { - if err := s.ProvisionedThroughput.Validate(); err != nil { - invalidParams.AddNested("ProvisionedThroughput", err.(request.ErrInvalidParams)) - } - } - if s.StreamSpecification != nil { - if err := s.StreamSpecification.Validate(); err != nil { - invalidParams.AddNested("StreamSpecification", err.(request.ErrInvalidParams)) - } - } - if s.Tags != nil { - for i, v := range s.Tags { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAttributeDefinitions sets the AttributeDefinitions field's value. -func (s *CreateTableInput) SetAttributeDefinitions(v []*AttributeDefinition) *CreateTableInput { - s.AttributeDefinitions = v - return s -} - -// SetBillingMode sets the BillingMode field's value. -func (s *CreateTableInput) SetBillingMode(v string) *CreateTableInput { - s.BillingMode = &v - return s -} - -// SetDeletionProtectionEnabled sets the DeletionProtectionEnabled field's value. -func (s *CreateTableInput) SetDeletionProtectionEnabled(v bool) *CreateTableInput { - s.DeletionProtectionEnabled = &v - return s -} - -// SetGlobalSecondaryIndexes sets the GlobalSecondaryIndexes field's value. -func (s *CreateTableInput) SetGlobalSecondaryIndexes(v []*GlobalSecondaryIndex) *CreateTableInput { - s.GlobalSecondaryIndexes = v - return s -} - -// SetKeySchema sets the KeySchema field's value. -func (s *CreateTableInput) SetKeySchema(v []*KeySchemaElement) *CreateTableInput { - s.KeySchema = v - return s -} - -// SetLocalSecondaryIndexes sets the LocalSecondaryIndexes field's value. -func (s *CreateTableInput) SetLocalSecondaryIndexes(v []*LocalSecondaryIndex) *CreateTableInput { - s.LocalSecondaryIndexes = v - return s -} - -// SetOnDemandThroughput sets the OnDemandThroughput field's value. -func (s *CreateTableInput) SetOnDemandThroughput(v *OnDemandThroughput) *CreateTableInput { - s.OnDemandThroughput = v - return s -} - -// SetProvisionedThroughput sets the ProvisionedThroughput field's value. -func (s *CreateTableInput) SetProvisionedThroughput(v *ProvisionedThroughput) *CreateTableInput { - s.ProvisionedThroughput = v - return s -} - -// SetResourcePolicy sets the ResourcePolicy field's value. -func (s *CreateTableInput) SetResourcePolicy(v string) *CreateTableInput { - s.ResourcePolicy = &v - return s -} - -// SetSSESpecification sets the SSESpecification field's value. -func (s *CreateTableInput) SetSSESpecification(v *SSESpecification) *CreateTableInput { - s.SSESpecification = v - return s -} - -// SetStreamSpecification sets the StreamSpecification field's value. -func (s *CreateTableInput) SetStreamSpecification(v *StreamSpecification) *CreateTableInput { - s.StreamSpecification = v - return s -} - -// SetTableClass sets the TableClass field's value. -func (s *CreateTableInput) SetTableClass(v string) *CreateTableInput { - s.TableClass = &v - return s -} - -// SetTableName sets the TableName field's value. -func (s *CreateTableInput) SetTableName(v string) *CreateTableInput { - s.TableName = &v - return s -} - -// SetTags sets the Tags field's value. -func (s *CreateTableInput) SetTags(v []*Tag) *CreateTableInput { - s.Tags = v - return s -} - -// Represents the output of a CreateTable operation. -type CreateTableOutput struct { - _ struct{} `type:"structure"` - - // Represents the properties of the table. - TableDescription *TableDescription `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateTableOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateTableOutput) GoString() string { - return s.String() -} - -// SetTableDescription sets the TableDescription field's value. -func (s *CreateTableOutput) SetTableDescription(v *TableDescription) *CreateTableOutput { - s.TableDescription = v - return s -} - -// Processing options for the CSV file being imported. -type CsvOptions struct { - _ struct{} `type:"structure"` - - // The delimiter used for separating items in the CSV file being imported. - Delimiter *string `min:"1" type:"string"` - - // List of the headers used to specify a common header for all source CSV files - // being imported. If this field is specified then the first line of each CSV - // file is treated as data instead of the header. If this field is not specified - // the the first line of each CSV file is treated as the header. - HeaderList []*string `min:"1" type:"list"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CsvOptions) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CsvOptions) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CsvOptions) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CsvOptions"} - if s.Delimiter != nil && len(*s.Delimiter) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Delimiter", 1)) - } - if s.HeaderList != nil && len(s.HeaderList) < 1 { - invalidParams.Add(request.NewErrParamMinLen("HeaderList", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDelimiter sets the Delimiter field's value. -func (s *CsvOptions) SetDelimiter(v string) *CsvOptions { - s.Delimiter = &v - return s -} - -// SetHeaderList sets the HeaderList field's value. -func (s *CsvOptions) SetHeaderList(v []*string) *CsvOptions { - s.HeaderList = v - return s -} - -// Represents a request to perform a DeleteItem operation. -type Delete struct { - _ struct{} `type:"structure"` - - // A condition that must be satisfied in order for a conditional delete to succeed. - ConditionExpression *string `type:"string"` - - // One or more substitution tokens for attribute names in an expression. - ExpressionAttributeNames map[string]*string `type:"map"` - - // One or more values that can be substituted in an expression. - ExpressionAttributeValues map[string]*AttributeValue `type:"map"` - - // The primary key of the item to be deleted. Each element consists of an attribute - // name and a value for that attribute. - // - // Key is a required field - Key map[string]*AttributeValue `type:"map" required:"true"` - - // Use ReturnValuesOnConditionCheckFailure to get the item attributes if the - // Delete condition fails. For ReturnValuesOnConditionCheckFailure, the valid - // values are: NONE and ALL_OLD. - ReturnValuesOnConditionCheckFailure *string `type:"string" enum:"ReturnValuesOnConditionCheckFailure"` - - // Name of the table in which the item to be deleted resides. You can also provide - // the Amazon Resource Name (ARN) of the table in this parameter. - // - // TableName is a required field - TableName *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Delete) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Delete) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *Delete) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Delete"} - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.TableName == nil { - invalidParams.Add(request.NewErrParamRequired("TableName")) - } - if s.TableName != nil && len(*s.TableName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetConditionExpression sets the ConditionExpression field's value. -func (s *Delete) SetConditionExpression(v string) *Delete { - s.ConditionExpression = &v - return s -} - -// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value. -func (s *Delete) SetExpressionAttributeNames(v map[string]*string) *Delete { - s.ExpressionAttributeNames = v - return s -} - -// SetExpressionAttributeValues sets the ExpressionAttributeValues field's value. -func (s *Delete) SetExpressionAttributeValues(v map[string]*AttributeValue) *Delete { - s.ExpressionAttributeValues = v - return s -} - -// SetKey sets the Key field's value. -func (s *Delete) SetKey(v map[string]*AttributeValue) *Delete { - s.Key = v - return s -} - -// SetReturnValuesOnConditionCheckFailure sets the ReturnValuesOnConditionCheckFailure field's value. -func (s *Delete) SetReturnValuesOnConditionCheckFailure(v string) *Delete { - s.ReturnValuesOnConditionCheckFailure = &v - return s -} - -// SetTableName sets the TableName field's value. -func (s *Delete) SetTableName(v string) *Delete { - s.TableName = &v - return s -} - -type DeleteBackupInput struct { - _ struct{} `type:"structure"` - - // The ARN associated with the backup. - // - // BackupArn is a required field - BackupArn *string `min:"37" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteBackupInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteBackupInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteBackupInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteBackupInput"} - if s.BackupArn == nil { - invalidParams.Add(request.NewErrParamRequired("BackupArn")) - } - if s.BackupArn != nil && len(*s.BackupArn) < 37 { - invalidParams.Add(request.NewErrParamMinLen("BackupArn", 37)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBackupArn sets the BackupArn field's value. -func (s *DeleteBackupInput) SetBackupArn(v string) *DeleteBackupInput { - s.BackupArn = &v - return s -} - -type DeleteBackupOutput struct { - _ struct{} `type:"structure"` - - // Contains the description of the backup created for the table. - BackupDescription *BackupDescription `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteBackupOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteBackupOutput) GoString() string { - return s.String() -} - -// SetBackupDescription sets the BackupDescription field's value. -func (s *DeleteBackupOutput) SetBackupDescription(v *BackupDescription) *DeleteBackupOutput { - s.BackupDescription = v - return s -} - -// Represents a global secondary index to be deleted from an existing table. -type DeleteGlobalSecondaryIndexAction struct { - _ struct{} `type:"structure"` - - // The name of the global secondary index to be deleted. - // - // IndexName is a required field - IndexName *string `min:"3" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteGlobalSecondaryIndexAction) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteGlobalSecondaryIndexAction) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteGlobalSecondaryIndexAction) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteGlobalSecondaryIndexAction"} - if s.IndexName == nil { - invalidParams.Add(request.NewErrParamRequired("IndexName")) - } - if s.IndexName != nil && len(*s.IndexName) < 3 { - invalidParams.Add(request.NewErrParamMinLen("IndexName", 3)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetIndexName sets the IndexName field's value. -func (s *DeleteGlobalSecondaryIndexAction) SetIndexName(v string) *DeleteGlobalSecondaryIndexAction { - s.IndexName = &v - return s -} - -// Represents the input of a DeleteItem operation. -type DeleteItemInput struct { - _ struct{} `type:"structure"` - - // A condition that must be satisfied in order for a conditional DeleteItem - // to succeed. - // - // An expression can contain any of the following: - // - // * Functions: attribute_exists | attribute_not_exists | attribute_type - // | contains | begins_with | size These function names are case-sensitive. - // - // * Comparison operators: = | <> | < | > | <= | >= | BETWEEN | IN - // - // * Logical operators: AND | OR | NOT - // - // For more information about condition expressions, see Condition Expressions - // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) - // in the Amazon DynamoDB Developer Guide. - ConditionExpression *string `type:"string"` - - // This is a legacy parameter. Use ConditionExpression instead. For more information, - // see ConditionalOperator (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html) - // in the Amazon DynamoDB Developer Guide. - ConditionalOperator *string `type:"string" enum:"ConditionalOperator"` - - // This is a legacy parameter. Use ConditionExpression instead. For more information, - // see Expected (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.Expected.html) - // in the Amazon DynamoDB Developer Guide. - Expected map[string]*ExpectedAttributeValue `type:"map"` - - // One or more substitution tokens for attribute names in an expression. The - // following are some use cases for using ExpressionAttributeNames: - // - // * To access an attribute whose name conflicts with a DynamoDB reserved - // word. - // - // * To create a placeholder for repeating occurrences of an attribute name - // in an expression. - // - // * To prevent special characters in an attribute name from being misinterpreted - // in an expression. - // - // Use the # character in an expression to dereference an attribute name. For - // example, consider the following attribute name: - // - // * Percentile - // - // The name of this attribute conflicts with a reserved word, so it cannot be - // used directly in an expression. (For the complete list of reserved words, - // see Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) - // in the Amazon DynamoDB Developer Guide). To work around this, you could specify - // the following for ExpressionAttributeNames: - // - // * {"#P":"Percentile"} - // - // You could then use this substitution in an expression, as in this example: - // - // * #P = :val - // - // Tokens that begin with the : character are expression attribute values, which - // are placeholders for the actual value at runtime. - // - // For more information on expression attribute names, see Specifying Item Attributes - // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) - // in the Amazon DynamoDB Developer Guide. - ExpressionAttributeNames map[string]*string `type:"map"` - - // One or more values that can be substituted in an expression. - // - // Use the : (colon) character in an expression to dereference an attribute - // value. For example, suppose that you wanted to check whether the value of - // the ProductStatus attribute was one of the following: - // - // Available | Backordered | Discontinued - // - // You would first need to specify ExpressionAttributeValues as follows: - // - // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"} - // } - // - // You could then use these values in an expression, such as this: - // - // ProductStatus IN (:avail, :back, :disc) - // - // For more information on expression attribute values, see Condition Expressions - // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) - // in the Amazon DynamoDB Developer Guide. - ExpressionAttributeValues map[string]*AttributeValue `type:"map"` - - // A map of attribute names to AttributeValue objects, representing the primary - // key of the item to delete. - // - // For the primary key, you must provide all of the key attributes. For example, - // with a simple primary key, you only need to provide a value for the partition - // key. For a composite primary key, you must provide values for both the partition - // key and the sort key. - // - // Key is a required field - Key map[string]*AttributeValue `type:"map" required:"true"` - - // Determines the level of detail about either provisioned or on-demand throughput - // consumption that is returned in the response: - // - // * INDEXES - The response includes the aggregate ConsumedCapacity for the - // operation, together with ConsumedCapacity for each table and secondary - // index that was accessed. Note that some operations, such as GetItem and - // BatchGetItem, do not access any indexes at all. In these cases, specifying - // INDEXES will only return ConsumedCapacity information for table(s). - // - // * TOTAL - The response includes only the aggregate ConsumedCapacity for - // the operation. - // - // * NONE - No ConsumedCapacity details are included in the response. - ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` - - // Determines whether item collection metrics are returned. If set to SIZE, - // the response includes statistics about item collections, if any, that were - // modified during the operation are returned in the response. If set to NONE - // (the default), no statistics are returned. - ReturnItemCollectionMetrics *string `type:"string" enum:"ReturnItemCollectionMetrics"` - - // Use ReturnValues if you want to get the item attributes as they appeared - // before they were deleted. For DeleteItem, the valid values are: - // - // * NONE - If ReturnValues is not specified, or if its value is NONE, then - // nothing is returned. (This setting is the default for ReturnValues.) - // - // * ALL_OLD - The content of the old item is returned. - // - // There is no additional cost associated with requesting a return value aside - // from the small network and processing overhead of receiving a larger response. - // No read capacity units are consumed. - // - // The ReturnValues parameter is used by several DynamoDB operations; however, - // DeleteItem does not recognize any values other than NONE or ALL_OLD. - ReturnValues *string `type:"string" enum:"ReturnValue"` - - // An optional parameter that returns the item attributes for a DeleteItem operation - // that failed a condition check. - // - // There is no additional cost associated with requesting a return value aside - // from the small network and processing overhead of receiving a larger response. - // No read capacity units are consumed. - ReturnValuesOnConditionCheckFailure *string `type:"string" enum:"ReturnValuesOnConditionCheckFailure"` - - // The name of the table from which to delete the item. You can also provide - // the Amazon Resource Name (ARN) of the table in this parameter. - // - // TableName is a required field - TableName *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteItemInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteItemInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteItemInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteItemInput"} - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.TableName == nil { - invalidParams.Add(request.NewErrParamRequired("TableName")) - } - if s.TableName != nil && len(*s.TableName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetConditionExpression sets the ConditionExpression field's value. -func (s *DeleteItemInput) SetConditionExpression(v string) *DeleteItemInput { - s.ConditionExpression = &v - return s -} - -// SetConditionalOperator sets the ConditionalOperator field's value. -func (s *DeleteItemInput) SetConditionalOperator(v string) *DeleteItemInput { - s.ConditionalOperator = &v - return s -} - -// SetExpected sets the Expected field's value. -func (s *DeleteItemInput) SetExpected(v map[string]*ExpectedAttributeValue) *DeleteItemInput { - s.Expected = v - return s -} - -// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value. -func (s *DeleteItemInput) SetExpressionAttributeNames(v map[string]*string) *DeleteItemInput { - s.ExpressionAttributeNames = v - return s -} - -// SetExpressionAttributeValues sets the ExpressionAttributeValues field's value. -func (s *DeleteItemInput) SetExpressionAttributeValues(v map[string]*AttributeValue) *DeleteItemInput { - s.ExpressionAttributeValues = v - return s -} - -// SetKey sets the Key field's value. -func (s *DeleteItemInput) SetKey(v map[string]*AttributeValue) *DeleteItemInput { - s.Key = v - return s -} - -// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value. -func (s *DeleteItemInput) SetReturnConsumedCapacity(v string) *DeleteItemInput { - s.ReturnConsumedCapacity = &v - return s -} - -// SetReturnItemCollectionMetrics sets the ReturnItemCollectionMetrics field's value. -func (s *DeleteItemInput) SetReturnItemCollectionMetrics(v string) *DeleteItemInput { - s.ReturnItemCollectionMetrics = &v - return s -} - -// SetReturnValues sets the ReturnValues field's value. -func (s *DeleteItemInput) SetReturnValues(v string) *DeleteItemInput { - s.ReturnValues = &v - return s -} - -// SetReturnValuesOnConditionCheckFailure sets the ReturnValuesOnConditionCheckFailure field's value. -func (s *DeleteItemInput) SetReturnValuesOnConditionCheckFailure(v string) *DeleteItemInput { - s.ReturnValuesOnConditionCheckFailure = &v - return s -} - -// SetTableName sets the TableName field's value. -func (s *DeleteItemInput) SetTableName(v string) *DeleteItemInput { - s.TableName = &v - return s -} - -// Represents the output of a DeleteItem operation. -type DeleteItemOutput struct { - _ struct{} `type:"structure"` - - // A map of attribute names to AttributeValue objects, representing the item - // as it appeared before the DeleteItem operation. This map appears in the response - // only if ReturnValues was specified as ALL_OLD in the request. - Attributes map[string]*AttributeValue `type:"map"` - - // The capacity units consumed by the DeleteItem operation. The data returned - // includes the total provisioned throughput consumed, along with statistics - // for the table and any indexes involved in the operation. ConsumedCapacity - // is only returned if the ReturnConsumedCapacity parameter was specified. For - // more information, see Provisioned capacity mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/provisioned-capacity-mode.html) - // in the Amazon DynamoDB Developer Guide. - ConsumedCapacity *ConsumedCapacity `type:"structure"` - - // Information about item collections, if any, that were affected by the DeleteItem - // operation. ItemCollectionMetrics is only returned if the ReturnItemCollectionMetrics - // parameter was specified. If the table does not have any local secondary indexes, - // this information is not returned in the response. - // - // Each ItemCollectionMetrics element consists of: - // - // * ItemCollectionKey - The partition key value of the item collection. - // This is the same as the partition key value of the item itself. - // - // * SizeEstimateRangeGB - An estimate of item collection size, in gigabytes. - // This value is a two-element array containing a lower bound and an upper - // bound for the estimate. The estimate includes the size of all the items - // in the table, plus the size of all attributes projected into all of the - // local secondary indexes on that table. Use this estimate to measure whether - // a local secondary index is approaching its size limit. The estimate is - // subject to change over time; therefore, do not rely on the precision or - // accuracy of the estimate. - ItemCollectionMetrics *ItemCollectionMetrics `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteItemOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteItemOutput) GoString() string { - return s.String() -} - -// SetAttributes sets the Attributes field's value. -func (s *DeleteItemOutput) SetAttributes(v map[string]*AttributeValue) *DeleteItemOutput { - s.Attributes = v - return s -} - -// SetConsumedCapacity sets the ConsumedCapacity field's value. -func (s *DeleteItemOutput) SetConsumedCapacity(v *ConsumedCapacity) *DeleteItemOutput { - s.ConsumedCapacity = v - return s -} - -// SetItemCollectionMetrics sets the ItemCollectionMetrics field's value. -func (s *DeleteItemOutput) SetItemCollectionMetrics(v *ItemCollectionMetrics) *DeleteItemOutput { - s.ItemCollectionMetrics = v - return s -} - -// Represents a replica to be removed. -type DeleteReplicaAction struct { - _ struct{} `type:"structure"` - - // The Region of the replica to be removed. - // - // RegionName is a required field - RegionName *string `type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteReplicaAction) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteReplicaAction) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteReplicaAction) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteReplicaAction"} - if s.RegionName == nil { - invalidParams.Add(request.NewErrParamRequired("RegionName")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetRegionName sets the RegionName field's value. -func (s *DeleteReplicaAction) SetRegionName(v string) *DeleteReplicaAction { - s.RegionName = &v - return s -} - -// Represents a replica to be deleted. -type DeleteReplicationGroupMemberAction struct { - _ struct{} `type:"structure"` - - // The Region where the replica exists. - // - // RegionName is a required field - RegionName *string `type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteReplicationGroupMemberAction) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteReplicationGroupMemberAction) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteReplicationGroupMemberAction) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteReplicationGroupMemberAction"} - if s.RegionName == nil { - invalidParams.Add(request.NewErrParamRequired("RegionName")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetRegionName sets the RegionName field's value. -func (s *DeleteReplicationGroupMemberAction) SetRegionName(v string) *DeleteReplicationGroupMemberAction { - s.RegionName = &v - return s -} - -// Represents a request to perform a DeleteItem operation on an item. -type DeleteRequest struct { - _ struct{} `type:"structure"` - - // A map of attribute name to attribute values, representing the primary key - // of the item to delete. All of the table's primary key attributes must be - // specified, and their data types must match those of the table's key schema. - // - // Key is a required field - Key map[string]*AttributeValue `type:"map" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteRequest) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteRequest) GoString() string { - return s.String() -} - -// SetKey sets the Key field's value. -func (s *DeleteRequest) SetKey(v map[string]*AttributeValue) *DeleteRequest { - s.Key = v - return s -} - -type DeleteResourcePolicyInput struct { - _ struct{} `type:"structure"` - - // A string value that you can use to conditionally delete your policy. When - // you provide an expected revision ID, if the revision ID of the existing policy - // on the resource doesn't match or if there's no policy attached to the resource, - // the request will fail and return a PolicyNotFoundException. - ExpectedRevisionId *string `min:"1" type:"string"` - - // The Amazon Resource Name (ARN) of the DynamoDB resource from which the policy - // will be removed. The resources you can specify include tables and streams. - // If you remove the policy of a table, it will also remove the permissions - // for the table's indexes defined in that policy document. This is because - // index permissions are defined in the table's policy. - // - // ResourceArn is a required field - ResourceArn *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteResourcePolicyInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteResourcePolicyInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteResourcePolicyInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteResourcePolicyInput"} - if s.ExpectedRevisionId != nil && len(*s.ExpectedRevisionId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ExpectedRevisionId", 1)) - } - if s.ResourceArn == nil { - invalidParams.Add(request.NewErrParamRequired("ResourceArn")) - } - if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetExpectedRevisionId sets the ExpectedRevisionId field's value. -func (s *DeleteResourcePolicyInput) SetExpectedRevisionId(v string) *DeleteResourcePolicyInput { - s.ExpectedRevisionId = &v - return s -} - -// SetResourceArn sets the ResourceArn field's value. -func (s *DeleteResourcePolicyInput) SetResourceArn(v string) *DeleteResourcePolicyInput { - s.ResourceArn = &v - return s -} - -type DeleteResourcePolicyOutput struct { - _ struct{} `type:"structure"` - - // A unique string that represents the revision ID of the policy. If you're - // comparing revision IDs, make sure to always use string comparison logic. - // - // This value will be empty if you make a request against a resource without - // a policy. - RevisionId *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteResourcePolicyOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteResourcePolicyOutput) GoString() string { - return s.String() -} - -// SetRevisionId sets the RevisionId field's value. -func (s *DeleteResourcePolicyOutput) SetRevisionId(v string) *DeleteResourcePolicyOutput { - s.RevisionId = &v - return s -} - -// Represents the input of a DeleteTable operation. -type DeleteTableInput struct { - _ struct{} `type:"structure"` - - // The name of the table to delete. You can also provide the Amazon Resource - // Name (ARN) of the table in this parameter. - // - // TableName is a required field - TableName *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteTableInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteTableInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteTableInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteTableInput"} - if s.TableName == nil { - invalidParams.Add(request.NewErrParamRequired("TableName")) - } - if s.TableName != nil && len(*s.TableName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetTableName sets the TableName field's value. -func (s *DeleteTableInput) SetTableName(v string) *DeleteTableInput { - s.TableName = &v - return s -} - -// Represents the output of a DeleteTable operation. -type DeleteTableOutput struct { - _ struct{} `type:"structure"` - - // Represents the properties of a table. - TableDescription *TableDescription `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteTableOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteTableOutput) GoString() string { - return s.String() -} - -// SetTableDescription sets the TableDescription field's value. -func (s *DeleteTableOutput) SetTableDescription(v *TableDescription) *DeleteTableOutput { - s.TableDescription = v - return s -} - -type DescribeBackupInput struct { - _ struct{} `type:"structure"` - - // The Amazon Resource Name (ARN) associated with the backup. - // - // BackupArn is a required field - BackupArn *string `min:"37" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DescribeBackupInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DescribeBackupInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeBackupInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeBackupInput"} - if s.BackupArn == nil { - invalidParams.Add(request.NewErrParamRequired("BackupArn")) - } - if s.BackupArn != nil && len(*s.BackupArn) < 37 { - invalidParams.Add(request.NewErrParamMinLen("BackupArn", 37)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBackupArn sets the BackupArn field's value. -func (s *DescribeBackupInput) SetBackupArn(v string) *DescribeBackupInput { - s.BackupArn = &v - return s -} - -type DescribeBackupOutput struct { - _ struct{} `type:"structure"` - - // Contains the description of the backup created for the table. - BackupDescription *BackupDescription `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DescribeBackupOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DescribeBackupOutput) GoString() string { - return s.String() -} - -// SetBackupDescription sets the BackupDescription field's value. -func (s *DescribeBackupOutput) SetBackupDescription(v *BackupDescription) *DescribeBackupOutput { - s.BackupDescription = v - return s -} - -type DescribeContinuousBackupsInput struct { - _ struct{} `type:"structure"` - - // Name of the table for which the customer wants to check the continuous backups - // and point in time recovery settings. - // - // You can also provide the Amazon Resource Name (ARN) of the table in this - // parameter. - // - // TableName is a required field - TableName *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DescribeContinuousBackupsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DescribeContinuousBackupsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeContinuousBackupsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeContinuousBackupsInput"} - if s.TableName == nil { - invalidParams.Add(request.NewErrParamRequired("TableName")) - } - if s.TableName != nil && len(*s.TableName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetTableName sets the TableName field's value. -func (s *DescribeContinuousBackupsInput) SetTableName(v string) *DescribeContinuousBackupsInput { - s.TableName = &v - return s -} - -type DescribeContinuousBackupsOutput struct { - _ struct{} `type:"structure"` - - // Represents the continuous backups and point in time recovery settings on - // the table. - ContinuousBackupsDescription *ContinuousBackupsDescription `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DescribeContinuousBackupsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DescribeContinuousBackupsOutput) GoString() string { - return s.String() -} - -// SetContinuousBackupsDescription sets the ContinuousBackupsDescription field's value. -func (s *DescribeContinuousBackupsOutput) SetContinuousBackupsDescription(v *ContinuousBackupsDescription) *DescribeContinuousBackupsOutput { - s.ContinuousBackupsDescription = v - return s -} - -type DescribeContributorInsightsInput struct { - _ struct{} `type:"structure"` - - // The name of the global secondary index to describe, if applicable. - IndexName *string `min:"3" type:"string"` - - // The name of the table to describe. You can also provide the Amazon Resource - // Name (ARN) of the table in this parameter. - // - // TableName is a required field - TableName *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DescribeContributorInsightsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DescribeContributorInsightsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeContributorInsightsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeContributorInsightsInput"} - if s.IndexName != nil && len(*s.IndexName) < 3 { - invalidParams.Add(request.NewErrParamMinLen("IndexName", 3)) - } - if s.TableName == nil { - invalidParams.Add(request.NewErrParamRequired("TableName")) - } - if s.TableName != nil && len(*s.TableName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetIndexName sets the IndexName field's value. -func (s *DescribeContributorInsightsInput) SetIndexName(v string) *DescribeContributorInsightsInput { - s.IndexName = &v - return s -} - -// SetTableName sets the TableName field's value. -func (s *DescribeContributorInsightsInput) SetTableName(v string) *DescribeContributorInsightsInput { - s.TableName = &v - return s -} - -type DescribeContributorInsightsOutput struct { - _ struct{} `type:"structure"` - - // List of names of the associated contributor insights rules. - ContributorInsightsRuleList []*string `type:"list"` - - // Current status of contributor insights. - ContributorInsightsStatus *string `type:"string" enum:"ContributorInsightsStatus"` - - // Returns information about the last failure that was encountered. - // - // The most common exceptions for a FAILED status are: - // - // * LimitExceededException - Per-account Amazon CloudWatch Contributor Insights - // rule limit reached. Please disable Contributor Insights for other tables/indexes - // OR disable Contributor Insights rules before retrying. - // - // * AccessDeniedException - Amazon CloudWatch Contributor Insights rules - // cannot be modified due to insufficient permissions. - // - // * AccessDeniedException - Failed to create service-linked role for Contributor - // Insights due to insufficient permissions. - // - // * InternalServerError - Failed to create Amazon CloudWatch Contributor - // Insights rules. Please retry request. - FailureException *FailureException `type:"structure"` - - // The name of the global secondary index being described. - IndexName *string `min:"3" type:"string"` - - // Timestamp of the last time the status was changed. - LastUpdateDateTime *time.Time `type:"timestamp"` - - // The name of the table being described. - TableName *string `min:"3" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DescribeContributorInsightsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DescribeContributorInsightsOutput) GoString() string { - return s.String() -} - -// SetContributorInsightsRuleList sets the ContributorInsightsRuleList field's value. -func (s *DescribeContributorInsightsOutput) SetContributorInsightsRuleList(v []*string) *DescribeContributorInsightsOutput { - s.ContributorInsightsRuleList = v - return s -} - -// SetContributorInsightsStatus sets the ContributorInsightsStatus field's value. -func (s *DescribeContributorInsightsOutput) SetContributorInsightsStatus(v string) *DescribeContributorInsightsOutput { - s.ContributorInsightsStatus = &v - return s -} - -// SetFailureException sets the FailureException field's value. -func (s *DescribeContributorInsightsOutput) SetFailureException(v *FailureException) *DescribeContributorInsightsOutput { - s.FailureException = v - return s -} - -// SetIndexName sets the IndexName field's value. -func (s *DescribeContributorInsightsOutput) SetIndexName(v string) *DescribeContributorInsightsOutput { - s.IndexName = &v - return s -} - -// SetLastUpdateDateTime sets the LastUpdateDateTime field's value. -func (s *DescribeContributorInsightsOutput) SetLastUpdateDateTime(v time.Time) *DescribeContributorInsightsOutput { - s.LastUpdateDateTime = &v - return s -} - -// SetTableName sets the TableName field's value. -func (s *DescribeContributorInsightsOutput) SetTableName(v string) *DescribeContributorInsightsOutput { - s.TableName = &v - return s -} - -type DescribeEndpointsInput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DescribeEndpointsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DescribeEndpointsInput) GoString() string { - return s.String() -} - -type DescribeEndpointsOutput struct { - _ struct{} `type:"structure"` - - // List of endpoints. - // - // Endpoints is a required field - Endpoints []*Endpoint `type:"list" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DescribeEndpointsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DescribeEndpointsOutput) GoString() string { - return s.String() -} - -// SetEndpoints sets the Endpoints field's value. -func (s *DescribeEndpointsOutput) SetEndpoints(v []*Endpoint) *DescribeEndpointsOutput { - s.Endpoints = v - return s -} - -type DescribeExportInput struct { - _ struct{} `type:"structure"` - - // The Amazon Resource Name (ARN) associated with the export. - // - // ExportArn is a required field - ExportArn *string `min:"37" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DescribeExportInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DescribeExportInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeExportInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeExportInput"} - if s.ExportArn == nil { - invalidParams.Add(request.NewErrParamRequired("ExportArn")) - } - if s.ExportArn != nil && len(*s.ExportArn) < 37 { - invalidParams.Add(request.NewErrParamMinLen("ExportArn", 37)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetExportArn sets the ExportArn field's value. -func (s *DescribeExportInput) SetExportArn(v string) *DescribeExportInput { - s.ExportArn = &v - return s -} - -type DescribeExportOutput struct { - _ struct{} `type:"structure"` - - // Represents the properties of the export. - ExportDescription *ExportDescription `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DescribeExportOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DescribeExportOutput) GoString() string { - return s.String() -} - -// SetExportDescription sets the ExportDescription field's value. -func (s *DescribeExportOutput) SetExportDescription(v *ExportDescription) *DescribeExportOutput { - s.ExportDescription = v - return s -} - -type DescribeGlobalTableInput struct { - _ struct{} `type:"structure"` - - // The name of the global table. - // - // GlobalTableName is a required field - GlobalTableName *string `min:"3" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DescribeGlobalTableInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DescribeGlobalTableInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeGlobalTableInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeGlobalTableInput"} - if s.GlobalTableName == nil { - invalidParams.Add(request.NewErrParamRequired("GlobalTableName")) - } - if s.GlobalTableName != nil && len(*s.GlobalTableName) < 3 { - invalidParams.Add(request.NewErrParamMinLen("GlobalTableName", 3)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetGlobalTableName sets the GlobalTableName field's value. -func (s *DescribeGlobalTableInput) SetGlobalTableName(v string) *DescribeGlobalTableInput { - s.GlobalTableName = &v - return s -} - -type DescribeGlobalTableOutput struct { - _ struct{} `type:"structure"` - - // Contains the details of the global table. - GlobalTableDescription *GlobalTableDescription `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DescribeGlobalTableOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DescribeGlobalTableOutput) GoString() string { - return s.String() -} - -// SetGlobalTableDescription sets the GlobalTableDescription field's value. -func (s *DescribeGlobalTableOutput) SetGlobalTableDescription(v *GlobalTableDescription) *DescribeGlobalTableOutput { - s.GlobalTableDescription = v - return s -} - -type DescribeGlobalTableSettingsInput struct { - _ struct{} `type:"structure"` - - // The name of the global table to describe. - // - // GlobalTableName is a required field - GlobalTableName *string `min:"3" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DescribeGlobalTableSettingsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DescribeGlobalTableSettingsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeGlobalTableSettingsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeGlobalTableSettingsInput"} - if s.GlobalTableName == nil { - invalidParams.Add(request.NewErrParamRequired("GlobalTableName")) - } - if s.GlobalTableName != nil && len(*s.GlobalTableName) < 3 { - invalidParams.Add(request.NewErrParamMinLen("GlobalTableName", 3)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetGlobalTableName sets the GlobalTableName field's value. -func (s *DescribeGlobalTableSettingsInput) SetGlobalTableName(v string) *DescribeGlobalTableSettingsInput { - s.GlobalTableName = &v - return s -} - -type DescribeGlobalTableSettingsOutput struct { - _ struct{} `type:"structure"` - - // The name of the global table. - GlobalTableName *string `min:"3" type:"string"` - - // The Region-specific settings for the global table. - ReplicaSettings []*ReplicaSettingsDescription `type:"list"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DescribeGlobalTableSettingsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DescribeGlobalTableSettingsOutput) GoString() string { - return s.String() -} - -// SetGlobalTableName sets the GlobalTableName field's value. -func (s *DescribeGlobalTableSettingsOutput) SetGlobalTableName(v string) *DescribeGlobalTableSettingsOutput { - s.GlobalTableName = &v - return s -} - -// SetReplicaSettings sets the ReplicaSettings field's value. -func (s *DescribeGlobalTableSettingsOutput) SetReplicaSettings(v []*ReplicaSettingsDescription) *DescribeGlobalTableSettingsOutput { - s.ReplicaSettings = v - return s -} - -type DescribeImportInput struct { - _ struct{} `type:"structure"` - - // The Amazon Resource Name (ARN) associated with the table you're importing - // to. - // - // ImportArn is a required field - ImportArn *string `min:"37" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DescribeImportInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DescribeImportInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeImportInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeImportInput"} - if s.ImportArn == nil { - invalidParams.Add(request.NewErrParamRequired("ImportArn")) - } - if s.ImportArn != nil && len(*s.ImportArn) < 37 { - invalidParams.Add(request.NewErrParamMinLen("ImportArn", 37)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetImportArn sets the ImportArn field's value. -func (s *DescribeImportInput) SetImportArn(v string) *DescribeImportInput { - s.ImportArn = &v - return s -} - -type DescribeImportOutput struct { - _ struct{} `type:"structure"` - - // Represents the properties of the table created for the import, and parameters - // of the import. The import parameters include import status, how many items - // were processed, and how many errors were encountered. - // - // ImportTableDescription is a required field - ImportTableDescription *ImportTableDescription `type:"structure" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DescribeImportOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DescribeImportOutput) GoString() string { - return s.String() -} - -// SetImportTableDescription sets the ImportTableDescription field's value. -func (s *DescribeImportOutput) SetImportTableDescription(v *ImportTableDescription) *DescribeImportOutput { - s.ImportTableDescription = v - return s -} - -type DescribeKinesisStreamingDestinationInput struct { - _ struct{} `type:"structure"` - - // The name of the table being described. You can also provide the Amazon Resource - // Name (ARN) of the table in this parameter. - // - // TableName is a required field - TableName *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DescribeKinesisStreamingDestinationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DescribeKinesisStreamingDestinationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeKinesisStreamingDestinationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeKinesisStreamingDestinationInput"} - if s.TableName == nil { - invalidParams.Add(request.NewErrParamRequired("TableName")) - } - if s.TableName != nil && len(*s.TableName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetTableName sets the TableName field's value. -func (s *DescribeKinesisStreamingDestinationInput) SetTableName(v string) *DescribeKinesisStreamingDestinationInput { - s.TableName = &v - return s -} - -type DescribeKinesisStreamingDestinationOutput struct { - _ struct{} `type:"structure"` - - // The list of replica structures for the table being described. - KinesisDataStreamDestinations []*KinesisDataStreamDestination `type:"list"` - - // The name of the table being described. - TableName *string `min:"3" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DescribeKinesisStreamingDestinationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DescribeKinesisStreamingDestinationOutput) GoString() string { - return s.String() -} - -// SetKinesisDataStreamDestinations sets the KinesisDataStreamDestinations field's value. -func (s *DescribeKinesisStreamingDestinationOutput) SetKinesisDataStreamDestinations(v []*KinesisDataStreamDestination) *DescribeKinesisStreamingDestinationOutput { - s.KinesisDataStreamDestinations = v - return s -} - -// SetTableName sets the TableName field's value. -func (s *DescribeKinesisStreamingDestinationOutput) SetTableName(v string) *DescribeKinesisStreamingDestinationOutput { - s.TableName = &v - return s -} - -// Represents the input of a DescribeLimits operation. Has no content. -type DescribeLimitsInput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DescribeLimitsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DescribeLimitsInput) GoString() string { - return s.String() -} - -// Represents the output of a DescribeLimits operation. -type DescribeLimitsOutput struct { - _ struct{} `type:"structure"` - - // The maximum total read capacity units that your account allows you to provision - // across all of your tables in this Region. - AccountMaxReadCapacityUnits *int64 `min:"1" type:"long"` - - // The maximum total write capacity units that your account allows you to provision - // across all of your tables in this Region. - AccountMaxWriteCapacityUnits *int64 `min:"1" type:"long"` - - // The maximum read capacity units that your account allows you to provision - // for a new table that you are creating in this Region, including the read - // capacity units provisioned for its global secondary indexes (GSIs). - TableMaxReadCapacityUnits *int64 `min:"1" type:"long"` - - // The maximum write capacity units that your account allows you to provision - // for a new table that you are creating in this Region, including the write - // capacity units provisioned for its global secondary indexes (GSIs). - TableMaxWriteCapacityUnits *int64 `min:"1" type:"long"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DescribeLimitsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DescribeLimitsOutput) GoString() string { - return s.String() -} - -// SetAccountMaxReadCapacityUnits sets the AccountMaxReadCapacityUnits field's value. -func (s *DescribeLimitsOutput) SetAccountMaxReadCapacityUnits(v int64) *DescribeLimitsOutput { - s.AccountMaxReadCapacityUnits = &v - return s -} - -// SetAccountMaxWriteCapacityUnits sets the AccountMaxWriteCapacityUnits field's value. -func (s *DescribeLimitsOutput) SetAccountMaxWriteCapacityUnits(v int64) *DescribeLimitsOutput { - s.AccountMaxWriteCapacityUnits = &v - return s -} - -// SetTableMaxReadCapacityUnits sets the TableMaxReadCapacityUnits field's value. -func (s *DescribeLimitsOutput) SetTableMaxReadCapacityUnits(v int64) *DescribeLimitsOutput { - s.TableMaxReadCapacityUnits = &v - return s -} - -// SetTableMaxWriteCapacityUnits sets the TableMaxWriteCapacityUnits field's value. -func (s *DescribeLimitsOutput) SetTableMaxWriteCapacityUnits(v int64) *DescribeLimitsOutput { - s.TableMaxWriteCapacityUnits = &v - return s -} - -// Represents the input of a DescribeTable operation. -type DescribeTableInput struct { - _ struct{} `type:"structure"` - - // The name of the table to describe. You can also provide the Amazon Resource - // Name (ARN) of the table in this parameter. - // - // TableName is a required field - TableName *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DescribeTableInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DescribeTableInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeTableInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeTableInput"} - if s.TableName == nil { - invalidParams.Add(request.NewErrParamRequired("TableName")) - } - if s.TableName != nil && len(*s.TableName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetTableName sets the TableName field's value. -func (s *DescribeTableInput) SetTableName(v string) *DescribeTableInput { - s.TableName = &v - return s -} - -// Represents the output of a DescribeTable operation. -type DescribeTableOutput struct { - _ struct{} `type:"structure"` - - // The properties of the table. - Table *TableDescription `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DescribeTableOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DescribeTableOutput) GoString() string { - return s.String() -} - -// SetTable sets the Table field's value. -func (s *DescribeTableOutput) SetTable(v *TableDescription) *DescribeTableOutput { - s.Table = v - return s -} - -type DescribeTableReplicaAutoScalingInput struct { - _ struct{} `type:"structure"` - - // The name of the table. You can also provide the Amazon Resource Name (ARN) - // of the table in this parameter. - // - // TableName is a required field - TableName *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DescribeTableReplicaAutoScalingInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DescribeTableReplicaAutoScalingInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeTableReplicaAutoScalingInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeTableReplicaAutoScalingInput"} - if s.TableName == nil { - invalidParams.Add(request.NewErrParamRequired("TableName")) - } - if s.TableName != nil && len(*s.TableName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetTableName sets the TableName field's value. -func (s *DescribeTableReplicaAutoScalingInput) SetTableName(v string) *DescribeTableReplicaAutoScalingInput { - s.TableName = &v - return s -} - -type DescribeTableReplicaAutoScalingOutput struct { - _ struct{} `type:"structure"` - - // Represents the auto scaling properties of the table. - TableAutoScalingDescription *TableAutoScalingDescription `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DescribeTableReplicaAutoScalingOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DescribeTableReplicaAutoScalingOutput) GoString() string { - return s.String() -} - -// SetTableAutoScalingDescription sets the TableAutoScalingDescription field's value. -func (s *DescribeTableReplicaAutoScalingOutput) SetTableAutoScalingDescription(v *TableAutoScalingDescription) *DescribeTableReplicaAutoScalingOutput { - s.TableAutoScalingDescription = v - return s -} - -type DescribeTimeToLiveInput struct { - _ struct{} `type:"structure"` - - // The name of the table to be described. You can also provide the Amazon Resource - // Name (ARN) of the table in this parameter. - // - // TableName is a required field - TableName *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DescribeTimeToLiveInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DescribeTimeToLiveInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeTimeToLiveInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeTimeToLiveInput"} - if s.TableName == nil { - invalidParams.Add(request.NewErrParamRequired("TableName")) - } - if s.TableName != nil && len(*s.TableName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetTableName sets the TableName field's value. -func (s *DescribeTimeToLiveInput) SetTableName(v string) *DescribeTimeToLiveInput { - s.TableName = &v - return s -} - -type DescribeTimeToLiveOutput struct { - _ struct{} `type:"structure"` - - // The description of the Time to Live (TTL) status on the specified table. - TimeToLiveDescription *TimeToLiveDescription `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DescribeTimeToLiveOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DescribeTimeToLiveOutput) GoString() string { - return s.String() -} - -// SetTimeToLiveDescription sets the TimeToLiveDescription field's value. -func (s *DescribeTimeToLiveOutput) SetTimeToLiveDescription(v *TimeToLiveDescription) *DescribeTimeToLiveOutput { - s.TimeToLiveDescription = v - return s -} - -type DisableKinesisStreamingDestinationInput struct { - _ struct{} `type:"structure"` - - // The source for the Kinesis streaming information that is being enabled. - EnableKinesisStreamingConfiguration *EnableKinesisStreamingConfiguration `type:"structure"` - - // The ARN for a Kinesis data stream. - // - // StreamArn is a required field - StreamArn *string `min:"37" type:"string" required:"true"` - - // The name of the DynamoDB table. You can also provide the Amazon Resource - // Name (ARN) of the table in this parameter. - // - // TableName is a required field - TableName *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DisableKinesisStreamingDestinationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DisableKinesisStreamingDestinationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DisableKinesisStreamingDestinationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DisableKinesisStreamingDestinationInput"} - if s.StreamArn == nil { - invalidParams.Add(request.NewErrParamRequired("StreamArn")) - } - if s.StreamArn != nil && len(*s.StreamArn) < 37 { - invalidParams.Add(request.NewErrParamMinLen("StreamArn", 37)) - } - if s.TableName == nil { - invalidParams.Add(request.NewErrParamRequired("TableName")) - } - if s.TableName != nil && len(*s.TableName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetEnableKinesisStreamingConfiguration sets the EnableKinesisStreamingConfiguration field's value. -func (s *DisableKinesisStreamingDestinationInput) SetEnableKinesisStreamingConfiguration(v *EnableKinesisStreamingConfiguration) *DisableKinesisStreamingDestinationInput { - s.EnableKinesisStreamingConfiguration = v - return s -} - -// SetStreamArn sets the StreamArn field's value. -func (s *DisableKinesisStreamingDestinationInput) SetStreamArn(v string) *DisableKinesisStreamingDestinationInput { - s.StreamArn = &v - return s -} - -// SetTableName sets the TableName field's value. -func (s *DisableKinesisStreamingDestinationInput) SetTableName(v string) *DisableKinesisStreamingDestinationInput { - s.TableName = &v - return s -} - -type DisableKinesisStreamingDestinationOutput struct { - _ struct{} `type:"structure"` - - // The current status of the replication. - DestinationStatus *string `type:"string" enum:"DestinationStatus"` - - // The destination for the Kinesis streaming information that is being enabled. - EnableKinesisStreamingConfiguration *EnableKinesisStreamingConfiguration `type:"structure"` - - // The ARN for the specific Kinesis data stream. - StreamArn *string `min:"37" type:"string"` - - // The name of the table being modified. - TableName *string `min:"3" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DisableKinesisStreamingDestinationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DisableKinesisStreamingDestinationOutput) GoString() string { - return s.String() -} - -// SetDestinationStatus sets the DestinationStatus field's value. -func (s *DisableKinesisStreamingDestinationOutput) SetDestinationStatus(v string) *DisableKinesisStreamingDestinationOutput { - s.DestinationStatus = &v - return s -} - -// SetEnableKinesisStreamingConfiguration sets the EnableKinesisStreamingConfiguration field's value. -func (s *DisableKinesisStreamingDestinationOutput) SetEnableKinesisStreamingConfiguration(v *EnableKinesisStreamingConfiguration) *DisableKinesisStreamingDestinationOutput { - s.EnableKinesisStreamingConfiguration = v - return s -} - -// SetStreamArn sets the StreamArn field's value. -func (s *DisableKinesisStreamingDestinationOutput) SetStreamArn(v string) *DisableKinesisStreamingDestinationOutput { - s.StreamArn = &v - return s -} - -// SetTableName sets the TableName field's value. -func (s *DisableKinesisStreamingDestinationOutput) SetTableName(v string) *DisableKinesisStreamingDestinationOutput { - s.TableName = &v - return s -} - -// There was an attempt to insert an item with the same primary key as an item -// that already exists in the DynamoDB table. -type DuplicateItemException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DuplicateItemException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DuplicateItemException) GoString() string { - return s.String() -} - -func newErrorDuplicateItemException(v protocol.ResponseMetadata) error { - return &DuplicateItemException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *DuplicateItemException) Code() string { - return "DuplicateItemException" -} - -// Message returns the exception's message. -func (s *DuplicateItemException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *DuplicateItemException) OrigErr() error { - return nil -} - -func (s *DuplicateItemException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *DuplicateItemException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *DuplicateItemException) RequestID() string { - return s.RespMetadata.RequestID -} - -// Enables setting the configuration for Kinesis Streaming. -type EnableKinesisStreamingConfiguration struct { - _ struct{} `type:"structure"` - - // Toggle for the precision of Kinesis data stream timestamp. The values are - // either MILLISECOND or MICROSECOND. - ApproximateCreationDateTimePrecision *string `type:"string" enum:"ApproximateCreationDateTimePrecision"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s EnableKinesisStreamingConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s EnableKinesisStreamingConfiguration) GoString() string { - return s.String() -} - -// SetApproximateCreationDateTimePrecision sets the ApproximateCreationDateTimePrecision field's value. -func (s *EnableKinesisStreamingConfiguration) SetApproximateCreationDateTimePrecision(v string) *EnableKinesisStreamingConfiguration { - s.ApproximateCreationDateTimePrecision = &v - return s -} - -type EnableKinesisStreamingDestinationInput struct { - _ struct{} `type:"structure"` - - // The source for the Kinesis streaming information that is being enabled. - EnableKinesisStreamingConfiguration *EnableKinesisStreamingConfiguration `type:"structure"` - - // The ARN for a Kinesis data stream. - // - // StreamArn is a required field - StreamArn *string `min:"37" type:"string" required:"true"` - - // The name of the DynamoDB table. You can also provide the Amazon Resource - // Name (ARN) of the table in this parameter. - // - // TableName is a required field - TableName *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s EnableKinesisStreamingDestinationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s EnableKinesisStreamingDestinationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *EnableKinesisStreamingDestinationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "EnableKinesisStreamingDestinationInput"} - if s.StreamArn == nil { - invalidParams.Add(request.NewErrParamRequired("StreamArn")) - } - if s.StreamArn != nil && len(*s.StreamArn) < 37 { - invalidParams.Add(request.NewErrParamMinLen("StreamArn", 37)) - } - if s.TableName == nil { - invalidParams.Add(request.NewErrParamRequired("TableName")) - } - if s.TableName != nil && len(*s.TableName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetEnableKinesisStreamingConfiguration sets the EnableKinesisStreamingConfiguration field's value. -func (s *EnableKinesisStreamingDestinationInput) SetEnableKinesisStreamingConfiguration(v *EnableKinesisStreamingConfiguration) *EnableKinesisStreamingDestinationInput { - s.EnableKinesisStreamingConfiguration = v - return s -} - -// SetStreamArn sets the StreamArn field's value. -func (s *EnableKinesisStreamingDestinationInput) SetStreamArn(v string) *EnableKinesisStreamingDestinationInput { - s.StreamArn = &v - return s -} - -// SetTableName sets the TableName field's value. -func (s *EnableKinesisStreamingDestinationInput) SetTableName(v string) *EnableKinesisStreamingDestinationInput { - s.TableName = &v - return s -} - -type EnableKinesisStreamingDestinationOutput struct { - _ struct{} `type:"structure"` - - // The current status of the replication. - DestinationStatus *string `type:"string" enum:"DestinationStatus"` - - // The destination for the Kinesis streaming information that is being enabled. - EnableKinesisStreamingConfiguration *EnableKinesisStreamingConfiguration `type:"structure"` - - // The ARN for the specific Kinesis data stream. - StreamArn *string `min:"37" type:"string"` - - // The name of the table being modified. - TableName *string `min:"3" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s EnableKinesisStreamingDestinationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s EnableKinesisStreamingDestinationOutput) GoString() string { - return s.String() -} - -// SetDestinationStatus sets the DestinationStatus field's value. -func (s *EnableKinesisStreamingDestinationOutput) SetDestinationStatus(v string) *EnableKinesisStreamingDestinationOutput { - s.DestinationStatus = &v - return s -} - -// SetEnableKinesisStreamingConfiguration sets the EnableKinesisStreamingConfiguration field's value. -func (s *EnableKinesisStreamingDestinationOutput) SetEnableKinesisStreamingConfiguration(v *EnableKinesisStreamingConfiguration) *EnableKinesisStreamingDestinationOutput { - s.EnableKinesisStreamingConfiguration = v - return s -} - -// SetStreamArn sets the StreamArn field's value. -func (s *EnableKinesisStreamingDestinationOutput) SetStreamArn(v string) *EnableKinesisStreamingDestinationOutput { - s.StreamArn = &v - return s -} - -// SetTableName sets the TableName field's value. -func (s *EnableKinesisStreamingDestinationOutput) SetTableName(v string) *EnableKinesisStreamingDestinationOutput { - s.TableName = &v - return s -} - -// An endpoint information details. -type Endpoint struct { - _ struct{} `type:"structure"` - - // IP address of the endpoint. - // - // Address is a required field - Address *string `type:"string" required:"true"` - - // Endpoint cache time to live (TTL) value. - // - // CachePeriodInMinutes is a required field - CachePeriodInMinutes *int64 `type:"long" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Endpoint) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Endpoint) GoString() string { - return s.String() -} - -// SetAddress sets the Address field's value. -func (s *Endpoint) SetAddress(v string) *Endpoint { - s.Address = &v - return s -} - -// SetCachePeriodInMinutes sets the CachePeriodInMinutes field's value. -func (s *Endpoint) SetCachePeriodInMinutes(v int64) *Endpoint { - s.CachePeriodInMinutes = &v - return s -} - -type ExecuteStatementInput struct { - _ struct{} `type:"structure"` - - // The consistency of a read operation. If set to true, then a strongly consistent - // read is used; otherwise, an eventually consistent read is used. - ConsistentRead *bool `type:"boolean"` - - // The maximum number of items to evaluate (not necessarily the number of matching - // items). If DynamoDB processes the number of items up to the limit while processing - // the results, it stops the operation and returns the matching values up to - // that point, along with a key in LastEvaluatedKey to apply in a subsequent - // operation so you can pick up where you left off. Also, if the processed dataset - // size exceeds 1 MB before DynamoDB reaches this limit, it stops the operation - // and returns the matching values up to the limit, and a key in LastEvaluatedKey - // to apply in a subsequent operation to continue the operation. - Limit *int64 `min:"1" type:"integer"` - - // Set this value to get remaining results, if NextToken was returned in the - // statement response. - NextToken *string `min:"1" type:"string"` - - // The parameters for the PartiQL statement, if any. - Parameters []*AttributeValue `min:"1" type:"list"` - - // Determines the level of detail about either provisioned or on-demand throughput - // consumption that is returned in the response: - // - // * INDEXES - The response includes the aggregate ConsumedCapacity for the - // operation, together with ConsumedCapacity for each table and secondary - // index that was accessed. Note that some operations, such as GetItem and - // BatchGetItem, do not access any indexes at all. In these cases, specifying - // INDEXES will only return ConsumedCapacity information for table(s). - // - // * TOTAL - The response includes only the aggregate ConsumedCapacity for - // the operation. - // - // * NONE - No ConsumedCapacity details are included in the response. - ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` - - // An optional parameter that returns the item attributes for an ExecuteStatement - // operation that failed a condition check. - // - // There is no additional cost associated with requesting a return value aside - // from the small network and processing overhead of receiving a larger response. - // No read capacity units are consumed. - ReturnValuesOnConditionCheckFailure *string `type:"string" enum:"ReturnValuesOnConditionCheckFailure"` - - // The PartiQL statement representing the operation to run. - // - // Statement is a required field - Statement *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ExecuteStatementInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ExecuteStatementInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ExecuteStatementInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ExecuteStatementInput"} - if s.Limit != nil && *s.Limit < 1 { - invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) - } - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) - } - if s.Parameters != nil && len(s.Parameters) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Parameters", 1)) - } - if s.Statement == nil { - invalidParams.Add(request.NewErrParamRequired("Statement")) - } - if s.Statement != nil && len(*s.Statement) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Statement", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetConsistentRead sets the ConsistentRead field's value. -func (s *ExecuteStatementInput) SetConsistentRead(v bool) *ExecuteStatementInput { - s.ConsistentRead = &v - return s -} - -// SetLimit sets the Limit field's value. -func (s *ExecuteStatementInput) SetLimit(v int64) *ExecuteStatementInput { - s.Limit = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ExecuteStatementInput) SetNextToken(v string) *ExecuteStatementInput { - s.NextToken = &v - return s -} - -// SetParameters sets the Parameters field's value. -func (s *ExecuteStatementInput) SetParameters(v []*AttributeValue) *ExecuteStatementInput { - s.Parameters = v - return s -} - -// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value. -func (s *ExecuteStatementInput) SetReturnConsumedCapacity(v string) *ExecuteStatementInput { - s.ReturnConsumedCapacity = &v - return s -} - -// SetReturnValuesOnConditionCheckFailure sets the ReturnValuesOnConditionCheckFailure field's value. -func (s *ExecuteStatementInput) SetReturnValuesOnConditionCheckFailure(v string) *ExecuteStatementInput { - s.ReturnValuesOnConditionCheckFailure = &v - return s -} - -// SetStatement sets the Statement field's value. -func (s *ExecuteStatementInput) SetStatement(v string) *ExecuteStatementInput { - s.Statement = &v - return s -} - -type ExecuteStatementOutput struct { - _ struct{} `type:"structure"` - - // The capacity units consumed by an operation. The data returned includes the - // total provisioned throughput consumed, along with statistics for the table - // and any indexes involved in the operation. ConsumedCapacity is only returned - // if the request asked for it. For more information, see Provisioned capacity - // mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/provisioned-capacity-mode.html) - // in the Amazon DynamoDB Developer Guide. - ConsumedCapacity *ConsumedCapacity `type:"structure"` - - // If a read operation was used, this property will contain the result of the - // read operation; a map of attribute names and their values. For the write - // operations this value will be empty. - Items []map[string]*AttributeValue `type:"list"` - - // The primary key of the item where the operation stopped, inclusive of the - // previous result set. Use this value to start a new operation, excluding this - // value in the new request. If LastEvaluatedKey is empty, then the "last page" - // of results has been processed and there is no more data to be retrieved. - // If LastEvaluatedKey is not empty, it does not necessarily mean that there - // is more data in the result set. The only way to know when you have reached - // the end of the result set is when LastEvaluatedKey is empty. - LastEvaluatedKey map[string]*AttributeValue `type:"map"` - - // If the response of a read request exceeds the response payload limit DynamoDB - // will set this value in the response. If set, you can use that this value - // in the subsequent request to get the remaining results. - NextToken *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ExecuteStatementOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ExecuteStatementOutput) GoString() string { - return s.String() -} - -// SetConsumedCapacity sets the ConsumedCapacity field's value. -func (s *ExecuteStatementOutput) SetConsumedCapacity(v *ConsumedCapacity) *ExecuteStatementOutput { - s.ConsumedCapacity = v - return s -} - -// SetItems sets the Items field's value. -func (s *ExecuteStatementOutput) SetItems(v []map[string]*AttributeValue) *ExecuteStatementOutput { - s.Items = v - return s -} - -// SetLastEvaluatedKey sets the LastEvaluatedKey field's value. -func (s *ExecuteStatementOutput) SetLastEvaluatedKey(v map[string]*AttributeValue) *ExecuteStatementOutput { - s.LastEvaluatedKey = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ExecuteStatementOutput) SetNextToken(v string) *ExecuteStatementOutput { - s.NextToken = &v - return s -} - -type ExecuteTransactionInput struct { - _ struct{} `type:"structure"` - - // Set this value to get remaining results, if NextToken was returned in the - // statement response. - ClientRequestToken *string `min:"1" type:"string" idempotencyToken:"true"` - - // Determines the level of detail about either provisioned or on-demand throughput - // consumption that is returned in the response. For more information, see TransactGetItems - // (https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_TransactGetItems.html) - // and TransactWriteItems (https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_TransactWriteItems.html). - ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` - - // The list of PartiQL statements representing the transaction to run. - // - // TransactStatements is a required field - TransactStatements []*ParameterizedStatement `min:"1" type:"list" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ExecuteTransactionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ExecuteTransactionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ExecuteTransactionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ExecuteTransactionInput"} - if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 1)) - } - if s.TransactStatements == nil { - invalidParams.Add(request.NewErrParamRequired("TransactStatements")) - } - if s.TransactStatements != nil && len(s.TransactStatements) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TransactStatements", 1)) - } - if s.TransactStatements != nil { - for i, v := range s.TransactStatements { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TransactStatements", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetClientRequestToken sets the ClientRequestToken field's value. -func (s *ExecuteTransactionInput) SetClientRequestToken(v string) *ExecuteTransactionInput { - s.ClientRequestToken = &v - return s -} - -// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value. -func (s *ExecuteTransactionInput) SetReturnConsumedCapacity(v string) *ExecuteTransactionInput { - s.ReturnConsumedCapacity = &v - return s -} - -// SetTransactStatements sets the TransactStatements field's value. -func (s *ExecuteTransactionInput) SetTransactStatements(v []*ParameterizedStatement) *ExecuteTransactionInput { - s.TransactStatements = v - return s -} - -type ExecuteTransactionOutput struct { - _ struct{} `type:"structure"` - - // The capacity units consumed by the entire operation. The values of the list - // are ordered according to the ordering of the statements. - ConsumedCapacity []*ConsumedCapacity `type:"list"` - - // The response to a PartiQL transaction. - Responses []*ItemResponse `min:"1" type:"list"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ExecuteTransactionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ExecuteTransactionOutput) GoString() string { - return s.String() -} - -// SetConsumedCapacity sets the ConsumedCapacity field's value. -func (s *ExecuteTransactionOutput) SetConsumedCapacity(v []*ConsumedCapacity) *ExecuteTransactionOutput { - s.ConsumedCapacity = v - return s -} - -// SetResponses sets the Responses field's value. -func (s *ExecuteTransactionOutput) SetResponses(v []*ItemResponse) *ExecuteTransactionOutput { - s.Responses = v - return s -} - -// Represents a condition to be compared with an attribute value. This condition -// can be used with DeleteItem, PutItem, or UpdateItem operations; if the comparison -// evaluates to true, the operation succeeds; if not, the operation fails. You -// can use ExpectedAttributeValue in one of two different ways: -// -// - Use AttributeValueList to specify one or more values to compare against -// an attribute. Use ComparisonOperator to specify how you want to perform -// the comparison. If the comparison evaluates to true, then the conditional -// operation succeeds. -// -// - Use Value to specify a value that DynamoDB will compare against an attribute. -// If the values match, then ExpectedAttributeValue evaluates to true and -// the conditional operation succeeds. Optionally, you can also set Exists -// to false, indicating that you do not expect to find the attribute value -// in the table. In this case, the conditional operation succeeds only if -// the comparison evaluates to false. -// -// Value and Exists are incompatible with AttributeValueList and ComparisonOperator. -// Note that if you use both sets of parameters at once, DynamoDB will return -// a ValidationException exception. -type ExpectedAttributeValue struct { - _ struct{} `type:"structure"` - - // One or more values to evaluate against the supplied attribute. The number - // of values in the list depends on the ComparisonOperator being used. - // - // For type Number, value comparisons are numeric. - // - // String value comparisons for greater than, equals, or less than are based - // on ASCII character code values. For example, a is greater than A, and a is - // greater than B. For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters - // (http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters). - // - // For Binary, DynamoDB treats each byte of the binary data as unsigned when - // it compares binary values. - // - // For information on specifying data types in JSON, see JSON Data Format (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataFormat.html) - // in the Amazon DynamoDB Developer Guide. - AttributeValueList []*AttributeValue `type:"list"` - - // A comparator for evaluating attributes in the AttributeValueList. For example, - // equals, greater than, less than, etc. - // - // The following comparison operators are available: - // - // EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS | - // BEGINS_WITH | IN | BETWEEN - // - // The following are descriptions of each comparison operator. - // - // * EQ : Equal. EQ is supported for all data types, including lists and - // maps. AttributeValueList can contain only one AttributeValue element of - // type String, Number, Binary, String Set, Number Set, or Binary Set. If - // an item contains an AttributeValue element of a different type than the - // one provided in the request, the value does not match. For example, {"S":"6"} - // does not equal {"N":"6"}. Also, {"N":"6"} does not equal {"NS":["6", "2", - // "1"]}. - // - // * NE : Not equal. NE is supported for all data types, including lists - // and maps. AttributeValueList can contain only one AttributeValue of type - // String, Number, Binary, String Set, Number Set, or Binary Set. If an item - // contains an AttributeValue of a different type than the one provided in - // the request, the value does not match. For example, {"S":"6"} does not - // equal {"N":"6"}. Also, {"N":"6"} does not equal {"NS":["6", "2", "1"]}. - // - // * LE : Less than or equal. AttributeValueList can contain only one AttributeValue - // element of type String, Number, or Binary (not a set type). If an item - // contains an AttributeValue element of a different type than the one provided - // in the request, the value does not match. For example, {"S":"6"} does - // not equal {"N":"6"}. Also, {"N":"6"} does not compare to {"NS":["6", "2", - // "1"]}. - // - // * LT : Less than. AttributeValueList can contain only one AttributeValue - // of type String, Number, or Binary (not a set type). If an item contains - // an AttributeValue element of a different type than the one provided in - // the request, the value does not match. For example, {"S":"6"} does not - // equal {"N":"6"}. Also, {"N":"6"} does not compare to {"NS":["6", "2", - // "1"]}. - // - // * GE : Greater than or equal. AttributeValueList can contain only one - // AttributeValue element of type String, Number, or Binary (not a set type). - // If an item contains an AttributeValue element of a different type than - // the one provided in the request, the value does not match. For example, - // {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} does not compare to - // {"NS":["6", "2", "1"]}. - // - // * GT : Greater than. AttributeValueList can contain only one AttributeValue - // element of type String, Number, or Binary (not a set type). If an item - // contains an AttributeValue element of a different type than the one provided - // in the request, the value does not match. For example, {"S":"6"} does - // not equal {"N":"6"}. Also, {"N":"6"} does not compare to {"NS":["6", "2", - // "1"]}. - // - // * NOT_NULL : The attribute exists. NOT_NULL is supported for all data - // types, including lists and maps. This operator tests for the existence - // of an attribute, not its data type. If the data type of attribute "a" - // is null, and you evaluate it using NOT_NULL, the result is a Boolean true. - // This result is because the attribute "a" exists; its data type is not - // relevant to the NOT_NULL comparison operator. - // - // * NULL : The attribute does not exist. NULL is supported for all data - // types, including lists and maps. This operator tests for the nonexistence - // of an attribute, not its data type. If the data type of attribute "a" - // is null, and you evaluate it using NULL, the result is a Boolean false. - // This is because the attribute "a" exists; its data type is not relevant - // to the NULL comparison operator. - // - // * CONTAINS : Checks for a subsequence, or value in a set. AttributeValueList - // can contain only one AttributeValue element of type String, Number, or - // Binary (not a set type). If the target attribute of the comparison is - // of type String, then the operator checks for a substring match. If the - // target attribute of the comparison is of type Binary, then the operator - // looks for a subsequence of the target that matches the input. If the target - // attribute of the comparison is a set ("SS", "NS", or "BS"), then the operator - // evaluates to true if it finds an exact match with any member of the set. - // CONTAINS is supported for lists: When evaluating "a CONTAINS b", "a" can - // be a list; however, "b" cannot be a set, a map, or a list. - // - // * NOT_CONTAINS : Checks for absence of a subsequence, or absence of a - // value in a set. AttributeValueList can contain only one AttributeValue - // element of type String, Number, or Binary (not a set type). If the target - // attribute of the comparison is a String, then the operator checks for - // the absence of a substring match. If the target attribute of the comparison - // is Binary, then the operator checks for the absence of a subsequence of - // the target that matches the input. If the target attribute of the comparison - // is a set ("SS", "NS", or "BS"), then the operator evaluates to true if - // it does not find an exact match with any member of the set. NOT_CONTAINS - // is supported for lists: When evaluating "a NOT CONTAINS b", "a" can be - // a list; however, "b" cannot be a set, a map, or a list. - // - // * BEGINS_WITH : Checks for a prefix. AttributeValueList can contain only - // one AttributeValue of type String or Binary (not a Number or a set type). - // The target attribute of the comparison must be of type String or Binary - // (not a Number or a set type). - // - // * IN : Checks for matching elements in a list. AttributeValueList can - // contain one or more AttributeValue elements of type String, Number, or - // Binary. These attributes are compared against an existing attribute of - // an item. If any elements of the input are equal to the item attribute, - // the expression evaluates to true. - // - // * BETWEEN : Greater than or equal to the first value, and less than or - // equal to the second value. AttributeValueList must contain two AttributeValue - // elements of the same type, either String, Number, or Binary (not a set - // type). A target attribute matches if the target value is greater than, - // or equal to, the first element and less than, or equal to, the second - // element. If an item contains an AttributeValue element of a different - // type than the one provided in the request, the value does not match. For - // example, {"S":"6"} does not compare to {"N":"6"}. Also, {"N":"6"} does - // not compare to {"NS":["6", "2", "1"]} - ComparisonOperator *string `type:"string" enum:"ComparisonOperator"` - - // Causes DynamoDB to evaluate the value before attempting a conditional operation: - // - // * If Exists is true, DynamoDB will check to see if that attribute value - // already exists in the table. If it is found, then the operation succeeds. - // If it is not found, the operation fails with a ConditionCheckFailedException. - // - // * If Exists is false, DynamoDB assumes that the attribute value does not - // exist in the table. If in fact the value does not exist, then the assumption - // is valid and the operation succeeds. If the value is found, despite the - // assumption that it does not exist, the operation fails with a ConditionCheckFailedException. - // - // The default setting for Exists is true. If you supply a Value all by itself, - // DynamoDB assumes the attribute exists: You don't have to set Exists to true, - // because it is implied. - // - // DynamoDB returns a ValidationException if: - // - // * Exists is true but there is no Value to check. (You expect a value to - // exist, but don't specify what that value is.) - // - // * Exists is false but you also provide a Value. (You cannot expect an - // attribute to have a value, while also expecting it not to exist.) - Exists *bool `type:"boolean"` - - // Represents the data for the expected attribute. - // - // Each attribute value is described as a name-value pair. The name is the data - // type, and the value is the data itself. - // - // For more information, see Data Types (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.NamingRulesDataTypes.html#HowItWorks.DataTypes) - // in the Amazon DynamoDB Developer Guide. - Value *AttributeValue `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ExpectedAttributeValue) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ExpectedAttributeValue) GoString() string { - return s.String() -} - -// SetAttributeValueList sets the AttributeValueList field's value. -func (s *ExpectedAttributeValue) SetAttributeValueList(v []*AttributeValue) *ExpectedAttributeValue { - s.AttributeValueList = v - return s -} - -// SetComparisonOperator sets the ComparisonOperator field's value. -func (s *ExpectedAttributeValue) SetComparisonOperator(v string) *ExpectedAttributeValue { - s.ComparisonOperator = &v - return s -} - -// SetExists sets the Exists field's value. -func (s *ExpectedAttributeValue) SetExists(v bool) *ExpectedAttributeValue { - s.Exists = &v - return s -} - -// SetValue sets the Value field's value. -func (s *ExpectedAttributeValue) SetValue(v *AttributeValue) *ExpectedAttributeValue { - s.Value = v - return s -} - -// There was a conflict when writing to the specified S3 bucket. -type ExportConflictException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ExportConflictException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ExportConflictException) GoString() string { - return s.String() -} - -func newErrorExportConflictException(v protocol.ResponseMetadata) error { - return &ExportConflictException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *ExportConflictException) Code() string { - return "ExportConflictException" -} - -// Message returns the exception's message. -func (s *ExportConflictException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *ExportConflictException) OrigErr() error { - return nil -} - -func (s *ExportConflictException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *ExportConflictException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *ExportConflictException) RequestID() string { - return s.RespMetadata.RequestID -} - -// Represents the properties of the exported table. -type ExportDescription struct { - _ struct{} `type:"structure"` - - // The billable size of the table export. - BilledSizeBytes *int64 `type:"long"` - - // The client token that was provided for the export task. A client token makes - // calls to ExportTableToPointInTimeInput idempotent, meaning that multiple - // identical calls have the same effect as one single call. - ClientToken *string `type:"string"` - - // The time at which the export task completed. - EndTime *time.Time `type:"timestamp"` - - // The Amazon Resource Name (ARN) of the table export. - ExportArn *string `min:"37" type:"string"` - - // The format of the exported data. Valid values for ExportFormat are DYNAMODB_JSON - // or ION. - ExportFormat *string `type:"string" enum:"ExportFormat"` - - // The name of the manifest file for the export task. - ExportManifest *string `type:"string"` - - // Export can be in one of the following states: IN_PROGRESS, COMPLETED, or - // FAILED. - ExportStatus *string `type:"string" enum:"ExportStatus"` - - // Point in time from which table data was exported. - ExportTime *time.Time `type:"timestamp"` - - // The type of export that was performed. Valid values are FULL_EXPORT or INCREMENTAL_EXPORT. - ExportType *string `type:"string" enum:"ExportType"` - - // Status code for the result of the failed export. - FailureCode *string `type:"string"` - - // Export failure reason description. - FailureMessage *string `type:"string"` - - // Optional object containing the parameters specific to an incremental export. - IncrementalExportSpecification *IncrementalExportSpecification `type:"structure"` - - // The number of items exported. - ItemCount *int64 `type:"long"` - - // The name of the Amazon S3 bucket containing the export. - S3Bucket *string `type:"string"` - - // The ID of the Amazon Web Services account that owns the bucket containing - // the export. - S3BucketOwner *string `type:"string"` - - // The Amazon S3 bucket prefix used as the file name and path of the exported - // snapshot. - S3Prefix *string `type:"string"` - - // Type of encryption used on the bucket where export data is stored. Valid - // values for S3SseAlgorithm are: - // - // * AES256 - server-side encryption with Amazon S3 managed keys - // - // * KMS - server-side encryption with KMS managed keys - S3SseAlgorithm *string `type:"string" enum:"S3SseAlgorithm"` - - // The ID of the KMS managed key used to encrypt the S3 bucket where export - // data is stored (if applicable). - S3SseKmsKeyId *string `min:"1" type:"string"` - - // The time at which the export task began. - StartTime *time.Time `type:"timestamp"` - - // The Amazon Resource Name (ARN) of the table that was exported. - TableArn *string `min:"1" type:"string"` - - // Unique ID of the table that was exported. - TableId *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ExportDescription) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ExportDescription) GoString() string { - return s.String() -} - -// SetBilledSizeBytes sets the BilledSizeBytes field's value. -func (s *ExportDescription) SetBilledSizeBytes(v int64) *ExportDescription { - s.BilledSizeBytes = &v - return s -} - -// SetClientToken sets the ClientToken field's value. -func (s *ExportDescription) SetClientToken(v string) *ExportDescription { - s.ClientToken = &v - return s -} - -// SetEndTime sets the EndTime field's value. -func (s *ExportDescription) SetEndTime(v time.Time) *ExportDescription { - s.EndTime = &v - return s -} - -// SetExportArn sets the ExportArn field's value. -func (s *ExportDescription) SetExportArn(v string) *ExportDescription { - s.ExportArn = &v - return s -} - -// SetExportFormat sets the ExportFormat field's value. -func (s *ExportDescription) SetExportFormat(v string) *ExportDescription { - s.ExportFormat = &v - return s -} - -// SetExportManifest sets the ExportManifest field's value. -func (s *ExportDescription) SetExportManifest(v string) *ExportDescription { - s.ExportManifest = &v - return s -} - -// SetExportStatus sets the ExportStatus field's value. -func (s *ExportDescription) SetExportStatus(v string) *ExportDescription { - s.ExportStatus = &v - return s -} - -// SetExportTime sets the ExportTime field's value. -func (s *ExportDescription) SetExportTime(v time.Time) *ExportDescription { - s.ExportTime = &v - return s -} - -// SetExportType sets the ExportType field's value. -func (s *ExportDescription) SetExportType(v string) *ExportDescription { - s.ExportType = &v - return s -} - -// SetFailureCode sets the FailureCode field's value. -func (s *ExportDescription) SetFailureCode(v string) *ExportDescription { - s.FailureCode = &v - return s -} - -// SetFailureMessage sets the FailureMessage field's value. -func (s *ExportDescription) SetFailureMessage(v string) *ExportDescription { - s.FailureMessage = &v - return s -} - -// SetIncrementalExportSpecification sets the IncrementalExportSpecification field's value. -func (s *ExportDescription) SetIncrementalExportSpecification(v *IncrementalExportSpecification) *ExportDescription { - s.IncrementalExportSpecification = v - return s -} - -// SetItemCount sets the ItemCount field's value. -func (s *ExportDescription) SetItemCount(v int64) *ExportDescription { - s.ItemCount = &v - return s -} - -// SetS3Bucket sets the S3Bucket field's value. -func (s *ExportDescription) SetS3Bucket(v string) *ExportDescription { - s.S3Bucket = &v - return s -} - -// SetS3BucketOwner sets the S3BucketOwner field's value. -func (s *ExportDescription) SetS3BucketOwner(v string) *ExportDescription { - s.S3BucketOwner = &v - return s -} - -// SetS3Prefix sets the S3Prefix field's value. -func (s *ExportDescription) SetS3Prefix(v string) *ExportDescription { - s.S3Prefix = &v - return s -} - -// SetS3SseAlgorithm sets the S3SseAlgorithm field's value. -func (s *ExportDescription) SetS3SseAlgorithm(v string) *ExportDescription { - s.S3SseAlgorithm = &v - return s -} - -// SetS3SseKmsKeyId sets the S3SseKmsKeyId field's value. -func (s *ExportDescription) SetS3SseKmsKeyId(v string) *ExportDescription { - s.S3SseKmsKeyId = &v - return s -} - -// SetStartTime sets the StartTime field's value. -func (s *ExportDescription) SetStartTime(v time.Time) *ExportDescription { - s.StartTime = &v - return s -} - -// SetTableArn sets the TableArn field's value. -func (s *ExportDescription) SetTableArn(v string) *ExportDescription { - s.TableArn = &v - return s -} - -// SetTableId sets the TableId field's value. -func (s *ExportDescription) SetTableId(v string) *ExportDescription { - s.TableId = &v - return s -} - -// The specified export was not found. -type ExportNotFoundException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ExportNotFoundException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ExportNotFoundException) GoString() string { - return s.String() -} - -func newErrorExportNotFoundException(v protocol.ResponseMetadata) error { - return &ExportNotFoundException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *ExportNotFoundException) Code() string { - return "ExportNotFoundException" -} - -// Message returns the exception's message. -func (s *ExportNotFoundException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *ExportNotFoundException) OrigErr() error { - return nil -} - -func (s *ExportNotFoundException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *ExportNotFoundException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *ExportNotFoundException) RequestID() string { - return s.RespMetadata.RequestID -} - -// Summary information about an export task. -type ExportSummary struct { - _ struct{} `type:"structure"` - - // The Amazon Resource Name (ARN) of the export. - ExportArn *string `min:"37" type:"string"` - - // Export can be in one of the following states: IN_PROGRESS, COMPLETED, or - // FAILED. - ExportStatus *string `type:"string" enum:"ExportStatus"` - - // The type of export that was performed. Valid values are FULL_EXPORT or INCREMENTAL_EXPORT. - ExportType *string `type:"string" enum:"ExportType"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ExportSummary) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ExportSummary) GoString() string { - return s.String() -} - -// SetExportArn sets the ExportArn field's value. -func (s *ExportSummary) SetExportArn(v string) *ExportSummary { - s.ExportArn = &v - return s -} - -// SetExportStatus sets the ExportStatus field's value. -func (s *ExportSummary) SetExportStatus(v string) *ExportSummary { - s.ExportStatus = &v - return s -} - -// SetExportType sets the ExportType field's value. -func (s *ExportSummary) SetExportType(v string) *ExportSummary { - s.ExportType = &v - return s -} - -type ExportTableToPointInTimeInput struct { - _ struct{} `type:"structure"` - - // Providing a ClientToken makes the call to ExportTableToPointInTimeInput idempotent, - // meaning that multiple identical calls have the same effect as one single - // call. - // - // A client token is valid for 8 hours after the first request that uses it - // is completed. After 8 hours, any request with the same client token is treated - // as a new request. Do not resubmit the same request with the same client token - // for more than 8 hours, or the result might not be idempotent. - // - // If you submit a request with the same client token but a change in other - // parameters within the 8-hour idempotency window, DynamoDB returns an ImportConflictException. - ClientToken *string `type:"string" idempotencyToken:"true"` - - // The format for the exported data. Valid values for ExportFormat are DYNAMODB_JSON - // or ION. - ExportFormat *string `type:"string" enum:"ExportFormat"` - - // Time in the past from which to export table data, counted in seconds from - // the start of the Unix epoch. The table export will be a snapshot of the table's - // state at this point in time. - ExportTime *time.Time `type:"timestamp"` - - // Choice of whether to execute as a full export or incremental export. Valid - // values are FULL_EXPORT or INCREMENTAL_EXPORT. The default value is FULL_EXPORT. - // If INCREMENTAL_EXPORT is provided, the IncrementalExportSpecification must - // also be used. - ExportType *string `type:"string" enum:"ExportType"` - - // Optional object containing the parameters specific to an incremental export. - IncrementalExportSpecification *IncrementalExportSpecification `type:"structure"` - - // The name of the Amazon S3 bucket to export the snapshot to. - // - // S3Bucket is a required field - S3Bucket *string `type:"string" required:"true"` - - // The ID of the Amazon Web Services account that owns the bucket the export - // will be stored in. - // - // S3BucketOwner is a required parameter when exporting to a S3 bucket in another - // account. - S3BucketOwner *string `type:"string"` - - // The Amazon S3 bucket prefix to use as the file name and path of the exported - // snapshot. - S3Prefix *string `type:"string"` - - // Type of encryption used on the bucket where export data will be stored. Valid - // values for S3SseAlgorithm are: - // - // * AES256 - server-side encryption with Amazon S3 managed keys - // - // * KMS - server-side encryption with KMS managed keys - S3SseAlgorithm *string `type:"string" enum:"S3SseAlgorithm"` - - // The ID of the KMS managed key used to encrypt the S3 bucket where export - // data will be stored (if applicable). - S3SseKmsKeyId *string `min:"1" type:"string"` - - // The Amazon Resource Name (ARN) associated with the table to export. - // - // TableArn is a required field - TableArn *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ExportTableToPointInTimeInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ExportTableToPointInTimeInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ExportTableToPointInTimeInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ExportTableToPointInTimeInput"} - if s.S3Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("S3Bucket")) - } - if s.S3SseKmsKeyId != nil && len(*s.S3SseKmsKeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("S3SseKmsKeyId", 1)) - } - if s.TableArn == nil { - invalidParams.Add(request.NewErrParamRequired("TableArn")) - } - if s.TableArn != nil && len(*s.TableArn) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TableArn", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetClientToken sets the ClientToken field's value. -func (s *ExportTableToPointInTimeInput) SetClientToken(v string) *ExportTableToPointInTimeInput { - s.ClientToken = &v - return s -} - -// SetExportFormat sets the ExportFormat field's value. -func (s *ExportTableToPointInTimeInput) SetExportFormat(v string) *ExportTableToPointInTimeInput { - s.ExportFormat = &v - return s -} - -// SetExportTime sets the ExportTime field's value. -func (s *ExportTableToPointInTimeInput) SetExportTime(v time.Time) *ExportTableToPointInTimeInput { - s.ExportTime = &v - return s -} - -// SetExportType sets the ExportType field's value. -func (s *ExportTableToPointInTimeInput) SetExportType(v string) *ExportTableToPointInTimeInput { - s.ExportType = &v - return s -} - -// SetIncrementalExportSpecification sets the IncrementalExportSpecification field's value. -func (s *ExportTableToPointInTimeInput) SetIncrementalExportSpecification(v *IncrementalExportSpecification) *ExportTableToPointInTimeInput { - s.IncrementalExportSpecification = v - return s -} - -// SetS3Bucket sets the S3Bucket field's value. -func (s *ExportTableToPointInTimeInput) SetS3Bucket(v string) *ExportTableToPointInTimeInput { - s.S3Bucket = &v - return s -} - -// SetS3BucketOwner sets the S3BucketOwner field's value. -func (s *ExportTableToPointInTimeInput) SetS3BucketOwner(v string) *ExportTableToPointInTimeInput { - s.S3BucketOwner = &v - return s -} - -// SetS3Prefix sets the S3Prefix field's value. -func (s *ExportTableToPointInTimeInput) SetS3Prefix(v string) *ExportTableToPointInTimeInput { - s.S3Prefix = &v - return s -} - -// SetS3SseAlgorithm sets the S3SseAlgorithm field's value. -func (s *ExportTableToPointInTimeInput) SetS3SseAlgorithm(v string) *ExportTableToPointInTimeInput { - s.S3SseAlgorithm = &v - return s -} - -// SetS3SseKmsKeyId sets the S3SseKmsKeyId field's value. -func (s *ExportTableToPointInTimeInput) SetS3SseKmsKeyId(v string) *ExportTableToPointInTimeInput { - s.S3SseKmsKeyId = &v - return s -} - -// SetTableArn sets the TableArn field's value. -func (s *ExportTableToPointInTimeInput) SetTableArn(v string) *ExportTableToPointInTimeInput { - s.TableArn = &v - return s -} - -type ExportTableToPointInTimeOutput struct { - _ struct{} `type:"structure"` - - // Contains a description of the table export. - ExportDescription *ExportDescription `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ExportTableToPointInTimeOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ExportTableToPointInTimeOutput) GoString() string { - return s.String() -} - -// SetExportDescription sets the ExportDescription field's value. -func (s *ExportTableToPointInTimeOutput) SetExportDescription(v *ExportDescription) *ExportTableToPointInTimeOutput { - s.ExportDescription = v - return s -} - -// Represents a failure a contributor insights operation. -type FailureException struct { - _ struct{} `type:"structure"` - - // Description of the failure. - ExceptionDescription *string `type:"string"` - - // Exception name. - ExceptionName *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s FailureException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s FailureException) GoString() string { - return s.String() -} - -// SetExceptionDescription sets the ExceptionDescription field's value. -func (s *FailureException) SetExceptionDescription(v string) *FailureException { - s.ExceptionDescription = &v - return s -} - -// SetExceptionName sets the ExceptionName field's value. -func (s *FailureException) SetExceptionName(v string) *FailureException { - s.ExceptionName = &v - return s -} - -// Specifies an item and related attribute values to retrieve in a TransactGetItem -// object. -type Get struct { - _ struct{} `type:"structure"` - - // One or more substitution tokens for attribute names in the ProjectionExpression - // parameter. - ExpressionAttributeNames map[string]*string `type:"map"` - - // A map of attribute names to AttributeValue objects that specifies the primary - // key of the item to retrieve. - // - // Key is a required field - Key map[string]*AttributeValue `type:"map" required:"true"` - - // A string that identifies one or more attributes of the specified item to - // retrieve from the table. The attributes in the expression must be separated - // by commas. If no attribute names are specified, then all attributes of the - // specified item are returned. If any of the requested attributes are not found, - // they do not appear in the result. - ProjectionExpression *string `type:"string"` - - // The name of the table from which to retrieve the specified item. You can - // also provide the Amazon Resource Name (ARN) of the table in this parameter. - // - // TableName is a required field - TableName *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Get) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Get) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *Get) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Get"} - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.TableName == nil { - invalidParams.Add(request.NewErrParamRequired("TableName")) - } - if s.TableName != nil && len(*s.TableName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value. -func (s *Get) SetExpressionAttributeNames(v map[string]*string) *Get { - s.ExpressionAttributeNames = v - return s -} - -// SetKey sets the Key field's value. -func (s *Get) SetKey(v map[string]*AttributeValue) *Get { - s.Key = v - return s -} - -// SetProjectionExpression sets the ProjectionExpression field's value. -func (s *Get) SetProjectionExpression(v string) *Get { - s.ProjectionExpression = &v - return s -} - -// SetTableName sets the TableName field's value. -func (s *Get) SetTableName(v string) *Get { - s.TableName = &v - return s -} - -// Represents the input of a GetItem operation. -type GetItemInput struct { - _ struct{} `type:"structure"` - - // This is a legacy parameter. Use ProjectionExpression instead. For more information, - // see AttributesToGet (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributesToGet.html) - // in the Amazon DynamoDB Developer Guide. - AttributesToGet []*string `min:"1" type:"list"` - - // Determines the read consistency model: If set to true, then the operation - // uses strongly consistent reads; otherwise, the operation uses eventually - // consistent reads. - ConsistentRead *bool `type:"boolean"` - - // One or more substitution tokens for attribute names in an expression. The - // following are some use cases for using ExpressionAttributeNames: - // - // * To access an attribute whose name conflicts with a DynamoDB reserved - // word. - // - // * To create a placeholder for repeating occurrences of an attribute name - // in an expression. - // - // * To prevent special characters in an attribute name from being misinterpreted - // in an expression. - // - // Use the # character in an expression to dereference an attribute name. For - // example, consider the following attribute name: - // - // * Percentile - // - // The name of this attribute conflicts with a reserved word, so it cannot be - // used directly in an expression. (For the complete list of reserved words, - // see Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) - // in the Amazon DynamoDB Developer Guide). To work around this, you could specify - // the following for ExpressionAttributeNames: - // - // * {"#P":"Percentile"} - // - // You could then use this substitution in an expression, as in this example: - // - // * #P = :val - // - // Tokens that begin with the : character are expression attribute values, which - // are placeholders for the actual value at runtime. - // - // For more information on expression attribute names, see Specifying Item Attributes - // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) - // in the Amazon DynamoDB Developer Guide. - ExpressionAttributeNames map[string]*string `type:"map"` - - // A map of attribute names to AttributeValue objects, representing the primary - // key of the item to retrieve. - // - // For the primary key, you must provide all of the attributes. For example, - // with a simple primary key, you only need to provide a value for the partition - // key. For a composite primary key, you must provide values for both the partition - // key and the sort key. - // - // Key is a required field - Key map[string]*AttributeValue `type:"map" required:"true"` - - // A string that identifies one or more attributes to retrieve from the table. - // These attributes can include scalars, sets, or elements of a JSON document. - // The attributes in the expression must be separated by commas. - // - // If no attribute names are specified, then all attributes are returned. If - // any of the requested attributes are not found, they do not appear in the - // result. - // - // For more information, see Specifying Item Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) - // in the Amazon DynamoDB Developer Guide. - ProjectionExpression *string `type:"string"` - - // Determines the level of detail about either provisioned or on-demand throughput - // consumption that is returned in the response: - // - // * INDEXES - The response includes the aggregate ConsumedCapacity for the - // operation, together with ConsumedCapacity for each table and secondary - // index that was accessed. Note that some operations, such as GetItem and - // BatchGetItem, do not access any indexes at all. In these cases, specifying - // INDEXES will only return ConsumedCapacity information for table(s). - // - // * TOTAL - The response includes only the aggregate ConsumedCapacity for - // the operation. - // - // * NONE - No ConsumedCapacity details are included in the response. - ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` - - // The name of the table containing the requested item. You can also provide - // the Amazon Resource Name (ARN) of the table in this parameter. - // - // TableName is a required field - TableName *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetItemInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetItemInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetItemInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetItemInput"} - if s.AttributesToGet != nil && len(s.AttributesToGet) < 1 { - invalidParams.Add(request.NewErrParamMinLen("AttributesToGet", 1)) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.TableName == nil { - invalidParams.Add(request.NewErrParamRequired("TableName")) - } - if s.TableName != nil && len(*s.TableName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAttributesToGet sets the AttributesToGet field's value. -func (s *GetItemInput) SetAttributesToGet(v []*string) *GetItemInput { - s.AttributesToGet = v - return s -} - -// SetConsistentRead sets the ConsistentRead field's value. -func (s *GetItemInput) SetConsistentRead(v bool) *GetItemInput { - s.ConsistentRead = &v - return s -} - -// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value. -func (s *GetItemInput) SetExpressionAttributeNames(v map[string]*string) *GetItemInput { - s.ExpressionAttributeNames = v - return s -} - -// SetKey sets the Key field's value. -func (s *GetItemInput) SetKey(v map[string]*AttributeValue) *GetItemInput { - s.Key = v - return s -} - -// SetProjectionExpression sets the ProjectionExpression field's value. -func (s *GetItemInput) SetProjectionExpression(v string) *GetItemInput { - s.ProjectionExpression = &v - return s -} - -// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value. -func (s *GetItemInput) SetReturnConsumedCapacity(v string) *GetItemInput { - s.ReturnConsumedCapacity = &v - return s -} - -// SetTableName sets the TableName field's value. -func (s *GetItemInput) SetTableName(v string) *GetItemInput { - s.TableName = &v - return s -} - -// Represents the output of a GetItem operation. -type GetItemOutput struct { - _ struct{} `type:"structure"` - - // The capacity units consumed by the GetItem operation. The data returned includes - // the total provisioned throughput consumed, along with statistics for the - // table and any indexes involved in the operation. ConsumedCapacity is only - // returned if the ReturnConsumedCapacity parameter was specified. For more - // information, see Capacity unit consumption for read operations (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/read-write-operations.html#read-operation-consumption) - // in the Amazon DynamoDB Developer Guide. - ConsumedCapacity *ConsumedCapacity `type:"structure"` - - // A map of attribute names to AttributeValue objects, as specified by ProjectionExpression. - Item map[string]*AttributeValue `type:"map"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetItemOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetItemOutput) GoString() string { - return s.String() -} - -// SetConsumedCapacity sets the ConsumedCapacity field's value. -func (s *GetItemOutput) SetConsumedCapacity(v *ConsumedCapacity) *GetItemOutput { - s.ConsumedCapacity = v - return s -} - -// SetItem sets the Item field's value. -func (s *GetItemOutput) SetItem(v map[string]*AttributeValue) *GetItemOutput { - s.Item = v - return s -} - -type GetResourcePolicyInput struct { - _ struct{} `type:"structure"` - - // The Amazon Resource Name (ARN) of the DynamoDB resource to which the policy - // is attached. The resources you can specify include tables and streams. - // - // ResourceArn is a required field - ResourceArn *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetResourcePolicyInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetResourcePolicyInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetResourcePolicyInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetResourcePolicyInput"} - if s.ResourceArn == nil { - invalidParams.Add(request.NewErrParamRequired("ResourceArn")) - } - if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetResourceArn sets the ResourceArn field's value. -func (s *GetResourcePolicyInput) SetResourceArn(v string) *GetResourcePolicyInput { - s.ResourceArn = &v - return s -} - -type GetResourcePolicyOutput struct { - _ struct{} `type:"structure"` - - // The resource-based policy document attached to the resource, which can be - // a table or stream, in JSON format. - Policy *string `type:"string"` - - // A unique string that represents the revision ID of the policy. If you're - // comparing revision IDs, make sure to always use string comparison logic. - RevisionId *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetResourcePolicyOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetResourcePolicyOutput) GoString() string { - return s.String() -} - -// SetPolicy sets the Policy field's value. -func (s *GetResourcePolicyOutput) SetPolicy(v string) *GetResourcePolicyOutput { - s.Policy = &v - return s -} - -// SetRevisionId sets the RevisionId field's value. -func (s *GetResourcePolicyOutput) SetRevisionId(v string) *GetResourcePolicyOutput { - s.RevisionId = &v - return s -} - -// Represents the properties of a global secondary index. -type GlobalSecondaryIndex struct { - _ struct{} `type:"structure"` - - // The name of the global secondary index. The name must be unique among all - // other indexes on this table. - // - // IndexName is a required field - IndexName *string `min:"3" type:"string" required:"true"` - - // The complete key schema for a global secondary index, which consists of one - // or more pairs of attribute names and key types: - // - // * HASH - partition key - // - // * RANGE - sort key - // - // The partition key of an item is also known as its hash attribute. The term - // "hash attribute" derives from DynamoDB's usage of an internal hash function - // to evenly distribute data items across partitions, based on their partition - // key values. - // - // The sort key of an item is also known as its range attribute. The term "range - // attribute" derives from the way DynamoDB stores items with the same partition - // key physically close together, in sorted order by the sort key value. - // - // KeySchema is a required field - KeySchema []*KeySchemaElement `min:"1" type:"list" required:"true"` - - // The maximum number of read and write units for the specified global secondary - // index. If you use this parameter, you must specify MaxReadRequestUnits, MaxWriteRequestUnits, - // or both. - OnDemandThroughput *OnDemandThroughput `type:"structure"` - - // Represents attributes that are copied (projected) from the table into the - // global secondary index. These are in addition to the primary key attributes - // and index key attributes, which are automatically projected. - // - // Projection is a required field - Projection *Projection `type:"structure" required:"true"` - - // Represents the provisioned throughput settings for the specified global secondary - // index. - // - // For current minimum and maximum provisioned throughput values, see Service, - // Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) - // in the Amazon DynamoDB Developer Guide. - ProvisionedThroughput *ProvisionedThroughput `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GlobalSecondaryIndex) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GlobalSecondaryIndex) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GlobalSecondaryIndex) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GlobalSecondaryIndex"} - if s.IndexName == nil { - invalidParams.Add(request.NewErrParamRequired("IndexName")) - } - if s.IndexName != nil && len(*s.IndexName) < 3 { - invalidParams.Add(request.NewErrParamMinLen("IndexName", 3)) - } - if s.KeySchema == nil { - invalidParams.Add(request.NewErrParamRequired("KeySchema")) - } - if s.KeySchema != nil && len(s.KeySchema) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeySchema", 1)) - } - if s.Projection == nil { - invalidParams.Add(request.NewErrParamRequired("Projection")) - } - if s.KeySchema != nil { - for i, v := range s.KeySchema { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "KeySchema", i), err.(request.ErrInvalidParams)) - } - } - } - if s.Projection != nil { - if err := s.Projection.Validate(); err != nil { - invalidParams.AddNested("Projection", err.(request.ErrInvalidParams)) - } - } - if s.ProvisionedThroughput != nil { - if err := s.ProvisionedThroughput.Validate(); err != nil { - invalidParams.AddNested("ProvisionedThroughput", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetIndexName sets the IndexName field's value. -func (s *GlobalSecondaryIndex) SetIndexName(v string) *GlobalSecondaryIndex { - s.IndexName = &v - return s -} - -// SetKeySchema sets the KeySchema field's value. -func (s *GlobalSecondaryIndex) SetKeySchema(v []*KeySchemaElement) *GlobalSecondaryIndex { - s.KeySchema = v - return s -} - -// SetOnDemandThroughput sets the OnDemandThroughput field's value. -func (s *GlobalSecondaryIndex) SetOnDemandThroughput(v *OnDemandThroughput) *GlobalSecondaryIndex { - s.OnDemandThroughput = v - return s -} - -// SetProjection sets the Projection field's value. -func (s *GlobalSecondaryIndex) SetProjection(v *Projection) *GlobalSecondaryIndex { - s.Projection = v - return s -} - -// SetProvisionedThroughput sets the ProvisionedThroughput field's value. -func (s *GlobalSecondaryIndex) SetProvisionedThroughput(v *ProvisionedThroughput) *GlobalSecondaryIndex { - s.ProvisionedThroughput = v - return s -} - -// Represents the auto scaling settings of a global secondary index for a global -// table that will be modified. -type GlobalSecondaryIndexAutoScalingUpdate struct { - _ struct{} `type:"structure"` - - // The name of the global secondary index. - IndexName *string `min:"3" type:"string"` - - // Represents the auto scaling settings to be modified for a global table or - // global secondary index. - ProvisionedWriteCapacityAutoScalingUpdate *AutoScalingSettingsUpdate `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GlobalSecondaryIndexAutoScalingUpdate) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GlobalSecondaryIndexAutoScalingUpdate) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GlobalSecondaryIndexAutoScalingUpdate) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GlobalSecondaryIndexAutoScalingUpdate"} - if s.IndexName != nil && len(*s.IndexName) < 3 { - invalidParams.Add(request.NewErrParamMinLen("IndexName", 3)) - } - if s.ProvisionedWriteCapacityAutoScalingUpdate != nil { - if err := s.ProvisionedWriteCapacityAutoScalingUpdate.Validate(); err != nil { - invalidParams.AddNested("ProvisionedWriteCapacityAutoScalingUpdate", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetIndexName sets the IndexName field's value. -func (s *GlobalSecondaryIndexAutoScalingUpdate) SetIndexName(v string) *GlobalSecondaryIndexAutoScalingUpdate { - s.IndexName = &v - return s -} - -// SetProvisionedWriteCapacityAutoScalingUpdate sets the ProvisionedWriteCapacityAutoScalingUpdate field's value. -func (s *GlobalSecondaryIndexAutoScalingUpdate) SetProvisionedWriteCapacityAutoScalingUpdate(v *AutoScalingSettingsUpdate) *GlobalSecondaryIndexAutoScalingUpdate { - s.ProvisionedWriteCapacityAutoScalingUpdate = v - return s -} - -// Represents the properties of a global secondary index. -type GlobalSecondaryIndexDescription struct { - _ struct{} `type:"structure"` - - // Indicates whether the index is currently backfilling. Backfilling is the - // process of reading items from the table and determining whether they can - // be added to the index. (Not all items will qualify: For example, a partition - // key cannot have any duplicate values.) If an item can be added to the index, - // DynamoDB will do so. After all items have been processed, the backfilling - // operation is complete and Backfilling is false. - // - // You can delete an index that is being created during the Backfilling phase - // when IndexStatus is set to CREATING and Backfilling is true. You can't delete - // the index that is being created when IndexStatus is set to CREATING and Backfilling - // is false. - // - // For indexes that were created during a CreateTable operation, the Backfilling - // attribute does not appear in the DescribeTable output. - Backfilling *bool `type:"boolean"` - - // The Amazon Resource Name (ARN) that uniquely identifies the index. - IndexArn *string `type:"string"` - - // The name of the global secondary index. - IndexName *string `min:"3" type:"string"` - - // The total size of the specified index, in bytes. DynamoDB updates this value - // approximately every six hours. Recent changes might not be reflected in this - // value. - IndexSizeBytes *int64 `type:"long"` - - // The current state of the global secondary index: - // - // * CREATING - The index is being created. - // - // * UPDATING - The index is being updated. - // - // * DELETING - The index is being deleted. - // - // * ACTIVE - The index is ready for use. - IndexStatus *string `type:"string" enum:"IndexStatus"` - - // The number of items in the specified index. DynamoDB updates this value approximately - // every six hours. Recent changes might not be reflected in this value. - ItemCount *int64 `type:"long"` - - // The complete key schema for a global secondary index, which consists of one - // or more pairs of attribute names and key types: - // - // * HASH - partition key - // - // * RANGE - sort key - // - // The partition key of an item is also known as its hash attribute. The term - // "hash attribute" derives from DynamoDB's usage of an internal hash function - // to evenly distribute data items across partitions, based on their partition - // key values. - // - // The sort key of an item is also known as its range attribute. The term "range - // attribute" derives from the way DynamoDB stores items with the same partition - // key physically close together, in sorted order by the sort key value. - KeySchema []*KeySchemaElement `min:"1" type:"list"` - - // The maximum number of read and write units for the specified global secondary - // index. If you use this parameter, you must specify MaxReadRequestUnits, MaxWriteRequestUnits, - // or both. - OnDemandThroughput *OnDemandThroughput `type:"structure"` - - // Represents attributes that are copied (projected) from the table into the - // global secondary index. These are in addition to the primary key attributes - // and index key attributes, which are automatically projected. - Projection *Projection `type:"structure"` - - // Represents the provisioned throughput settings for the specified global secondary - // index. - // - // For current minimum and maximum provisioned throughput values, see Service, - // Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) - // in the Amazon DynamoDB Developer Guide. - ProvisionedThroughput *ProvisionedThroughputDescription `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GlobalSecondaryIndexDescription) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GlobalSecondaryIndexDescription) GoString() string { - return s.String() -} - -// SetBackfilling sets the Backfilling field's value. -func (s *GlobalSecondaryIndexDescription) SetBackfilling(v bool) *GlobalSecondaryIndexDescription { - s.Backfilling = &v - return s -} - -// SetIndexArn sets the IndexArn field's value. -func (s *GlobalSecondaryIndexDescription) SetIndexArn(v string) *GlobalSecondaryIndexDescription { - s.IndexArn = &v - return s -} - -// SetIndexName sets the IndexName field's value. -func (s *GlobalSecondaryIndexDescription) SetIndexName(v string) *GlobalSecondaryIndexDescription { - s.IndexName = &v - return s -} - -// SetIndexSizeBytes sets the IndexSizeBytes field's value. -func (s *GlobalSecondaryIndexDescription) SetIndexSizeBytes(v int64) *GlobalSecondaryIndexDescription { - s.IndexSizeBytes = &v - return s -} - -// SetIndexStatus sets the IndexStatus field's value. -func (s *GlobalSecondaryIndexDescription) SetIndexStatus(v string) *GlobalSecondaryIndexDescription { - s.IndexStatus = &v - return s -} - -// SetItemCount sets the ItemCount field's value. -func (s *GlobalSecondaryIndexDescription) SetItemCount(v int64) *GlobalSecondaryIndexDescription { - s.ItemCount = &v - return s -} - -// SetKeySchema sets the KeySchema field's value. -func (s *GlobalSecondaryIndexDescription) SetKeySchema(v []*KeySchemaElement) *GlobalSecondaryIndexDescription { - s.KeySchema = v - return s -} - -// SetOnDemandThroughput sets the OnDemandThroughput field's value. -func (s *GlobalSecondaryIndexDescription) SetOnDemandThroughput(v *OnDemandThroughput) *GlobalSecondaryIndexDescription { - s.OnDemandThroughput = v - return s -} - -// SetProjection sets the Projection field's value. -func (s *GlobalSecondaryIndexDescription) SetProjection(v *Projection) *GlobalSecondaryIndexDescription { - s.Projection = v - return s -} - -// SetProvisionedThroughput sets the ProvisionedThroughput field's value. -func (s *GlobalSecondaryIndexDescription) SetProvisionedThroughput(v *ProvisionedThroughputDescription) *GlobalSecondaryIndexDescription { - s.ProvisionedThroughput = v - return s -} - -// Represents the properties of a global secondary index for the table when -// the backup was created. -type GlobalSecondaryIndexInfo struct { - _ struct{} `type:"structure"` - - // The name of the global secondary index. - IndexName *string `min:"3" type:"string"` - - // The complete key schema for a global secondary index, which consists of one - // or more pairs of attribute names and key types: - // - // * HASH - partition key - // - // * RANGE - sort key - // - // The partition key of an item is also known as its hash attribute. The term - // "hash attribute" derives from DynamoDB's usage of an internal hash function - // to evenly distribute data items across partitions, based on their partition - // key values. - // - // The sort key of an item is also known as its range attribute. The term "range - // attribute" derives from the way DynamoDB stores items with the same partition - // key physically close together, in sorted order by the sort key value. - KeySchema []*KeySchemaElement `min:"1" type:"list"` - - // Sets the maximum number of read and write units for the specified on-demand - // table. If you use this parameter, you must specify MaxReadRequestUnits, MaxWriteRequestUnits, - // or both. - OnDemandThroughput *OnDemandThroughput `type:"structure"` - - // Represents attributes that are copied (projected) from the table into the - // global secondary index. These are in addition to the primary key attributes - // and index key attributes, which are automatically projected. - Projection *Projection `type:"structure"` - - // Represents the provisioned throughput settings for the specified global secondary - // index. - ProvisionedThroughput *ProvisionedThroughput `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GlobalSecondaryIndexInfo) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GlobalSecondaryIndexInfo) GoString() string { - return s.String() -} - -// SetIndexName sets the IndexName field's value. -func (s *GlobalSecondaryIndexInfo) SetIndexName(v string) *GlobalSecondaryIndexInfo { - s.IndexName = &v - return s -} - -// SetKeySchema sets the KeySchema field's value. -func (s *GlobalSecondaryIndexInfo) SetKeySchema(v []*KeySchemaElement) *GlobalSecondaryIndexInfo { - s.KeySchema = v - return s -} - -// SetOnDemandThroughput sets the OnDemandThroughput field's value. -func (s *GlobalSecondaryIndexInfo) SetOnDemandThroughput(v *OnDemandThroughput) *GlobalSecondaryIndexInfo { - s.OnDemandThroughput = v - return s -} - -// SetProjection sets the Projection field's value. -func (s *GlobalSecondaryIndexInfo) SetProjection(v *Projection) *GlobalSecondaryIndexInfo { - s.Projection = v - return s -} - -// SetProvisionedThroughput sets the ProvisionedThroughput field's value. -func (s *GlobalSecondaryIndexInfo) SetProvisionedThroughput(v *ProvisionedThroughput) *GlobalSecondaryIndexInfo { - s.ProvisionedThroughput = v - return s -} - -// Represents one of the following: -// -// - A new global secondary index to be added to an existing table. -// -// - New provisioned throughput parameters for an existing global secondary -// index. -// -// - An existing global secondary index to be removed from an existing table. -type GlobalSecondaryIndexUpdate struct { - _ struct{} `type:"structure"` - - // The parameters required for creating a global secondary index on an existing - // table: - // - // * IndexName - // - // * KeySchema - // - // * AttributeDefinitions - // - // * Projection - // - // * ProvisionedThroughput - Create *CreateGlobalSecondaryIndexAction `type:"structure"` - - // The name of an existing global secondary index to be removed. - Delete *DeleteGlobalSecondaryIndexAction `type:"structure"` - - // The name of an existing global secondary index, along with new provisioned - // throughput settings to be applied to that index. - Update *UpdateGlobalSecondaryIndexAction `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GlobalSecondaryIndexUpdate) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GlobalSecondaryIndexUpdate) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GlobalSecondaryIndexUpdate) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GlobalSecondaryIndexUpdate"} - if s.Create != nil { - if err := s.Create.Validate(); err != nil { - invalidParams.AddNested("Create", err.(request.ErrInvalidParams)) - } - } - if s.Delete != nil { - if err := s.Delete.Validate(); err != nil { - invalidParams.AddNested("Delete", err.(request.ErrInvalidParams)) - } - } - if s.Update != nil { - if err := s.Update.Validate(); err != nil { - invalidParams.AddNested("Update", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCreate sets the Create field's value. -func (s *GlobalSecondaryIndexUpdate) SetCreate(v *CreateGlobalSecondaryIndexAction) *GlobalSecondaryIndexUpdate { - s.Create = v - return s -} - -// SetDelete sets the Delete field's value. -func (s *GlobalSecondaryIndexUpdate) SetDelete(v *DeleteGlobalSecondaryIndexAction) *GlobalSecondaryIndexUpdate { - s.Delete = v - return s -} - -// SetUpdate sets the Update field's value. -func (s *GlobalSecondaryIndexUpdate) SetUpdate(v *UpdateGlobalSecondaryIndexAction) *GlobalSecondaryIndexUpdate { - s.Update = v - return s -} - -// Represents the properties of a global table. -type GlobalTable struct { - _ struct{} `type:"structure"` - - // The global table name. - GlobalTableName *string `min:"3" type:"string"` - - // The Regions where the global table has replicas. - ReplicationGroup []*Replica `type:"list"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GlobalTable) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GlobalTable) GoString() string { - return s.String() -} - -// SetGlobalTableName sets the GlobalTableName field's value. -func (s *GlobalTable) SetGlobalTableName(v string) *GlobalTable { - s.GlobalTableName = &v - return s -} - -// SetReplicationGroup sets the ReplicationGroup field's value. -func (s *GlobalTable) SetReplicationGroup(v []*Replica) *GlobalTable { - s.ReplicationGroup = v - return s -} - -// The specified global table already exists. -type GlobalTableAlreadyExistsException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GlobalTableAlreadyExistsException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GlobalTableAlreadyExistsException) GoString() string { - return s.String() -} - -func newErrorGlobalTableAlreadyExistsException(v protocol.ResponseMetadata) error { - return &GlobalTableAlreadyExistsException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *GlobalTableAlreadyExistsException) Code() string { - return "GlobalTableAlreadyExistsException" -} - -// Message returns the exception's message. -func (s *GlobalTableAlreadyExistsException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *GlobalTableAlreadyExistsException) OrigErr() error { - return nil -} - -func (s *GlobalTableAlreadyExistsException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *GlobalTableAlreadyExistsException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *GlobalTableAlreadyExistsException) RequestID() string { - return s.RespMetadata.RequestID -} - -// Contains details about the global table. -type GlobalTableDescription struct { - _ struct{} `type:"structure"` - - // The creation time of the global table. - CreationDateTime *time.Time `type:"timestamp"` - - // The unique identifier of the global table. - GlobalTableArn *string `type:"string"` - - // The global table name. - GlobalTableName *string `min:"3" type:"string"` - - // The current state of the global table: - // - // * CREATING - The global table is being created. - // - // * UPDATING - The global table is being updated. - // - // * DELETING - The global table is being deleted. - // - // * ACTIVE - The global table is ready for use. - GlobalTableStatus *string `type:"string" enum:"GlobalTableStatus"` - - // The Regions where the global table has replicas. - ReplicationGroup []*ReplicaDescription `type:"list"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GlobalTableDescription) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GlobalTableDescription) GoString() string { - return s.String() -} - -// SetCreationDateTime sets the CreationDateTime field's value. -func (s *GlobalTableDescription) SetCreationDateTime(v time.Time) *GlobalTableDescription { - s.CreationDateTime = &v - return s -} - -// SetGlobalTableArn sets the GlobalTableArn field's value. -func (s *GlobalTableDescription) SetGlobalTableArn(v string) *GlobalTableDescription { - s.GlobalTableArn = &v - return s -} - -// SetGlobalTableName sets the GlobalTableName field's value. -func (s *GlobalTableDescription) SetGlobalTableName(v string) *GlobalTableDescription { - s.GlobalTableName = &v - return s -} - -// SetGlobalTableStatus sets the GlobalTableStatus field's value. -func (s *GlobalTableDescription) SetGlobalTableStatus(v string) *GlobalTableDescription { - s.GlobalTableStatus = &v - return s -} - -// SetReplicationGroup sets the ReplicationGroup field's value. -func (s *GlobalTableDescription) SetReplicationGroup(v []*ReplicaDescription) *GlobalTableDescription { - s.ReplicationGroup = v - return s -} - -// Represents the settings of a global secondary index for a global table that -// will be modified. -type GlobalTableGlobalSecondaryIndexSettingsUpdate struct { - _ struct{} `type:"structure"` - - // The name of the global secondary index. The name must be unique among all - // other indexes on this table. - // - // IndexName is a required field - IndexName *string `min:"3" type:"string" required:"true"` - - // Auto scaling settings for managing a global secondary index's write capacity - // units. - ProvisionedWriteCapacityAutoScalingSettingsUpdate *AutoScalingSettingsUpdate `type:"structure"` - - // The maximum number of writes consumed per second before DynamoDB returns - // a ThrottlingException. - ProvisionedWriteCapacityUnits *int64 `min:"1" type:"long"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GlobalTableGlobalSecondaryIndexSettingsUpdate) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GlobalTableGlobalSecondaryIndexSettingsUpdate) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GlobalTableGlobalSecondaryIndexSettingsUpdate) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GlobalTableGlobalSecondaryIndexSettingsUpdate"} - if s.IndexName == nil { - invalidParams.Add(request.NewErrParamRequired("IndexName")) - } - if s.IndexName != nil && len(*s.IndexName) < 3 { - invalidParams.Add(request.NewErrParamMinLen("IndexName", 3)) - } - if s.ProvisionedWriteCapacityUnits != nil && *s.ProvisionedWriteCapacityUnits < 1 { - invalidParams.Add(request.NewErrParamMinValue("ProvisionedWriteCapacityUnits", 1)) - } - if s.ProvisionedWriteCapacityAutoScalingSettingsUpdate != nil { - if err := s.ProvisionedWriteCapacityAutoScalingSettingsUpdate.Validate(); err != nil { - invalidParams.AddNested("ProvisionedWriteCapacityAutoScalingSettingsUpdate", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetIndexName sets the IndexName field's value. -func (s *GlobalTableGlobalSecondaryIndexSettingsUpdate) SetIndexName(v string) *GlobalTableGlobalSecondaryIndexSettingsUpdate { - s.IndexName = &v - return s -} - -// SetProvisionedWriteCapacityAutoScalingSettingsUpdate sets the ProvisionedWriteCapacityAutoScalingSettingsUpdate field's value. -func (s *GlobalTableGlobalSecondaryIndexSettingsUpdate) SetProvisionedWriteCapacityAutoScalingSettingsUpdate(v *AutoScalingSettingsUpdate) *GlobalTableGlobalSecondaryIndexSettingsUpdate { - s.ProvisionedWriteCapacityAutoScalingSettingsUpdate = v - return s -} - -// SetProvisionedWriteCapacityUnits sets the ProvisionedWriteCapacityUnits field's value. -func (s *GlobalTableGlobalSecondaryIndexSettingsUpdate) SetProvisionedWriteCapacityUnits(v int64) *GlobalTableGlobalSecondaryIndexSettingsUpdate { - s.ProvisionedWriteCapacityUnits = &v - return s -} - -// The specified global table does not exist. -type GlobalTableNotFoundException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GlobalTableNotFoundException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GlobalTableNotFoundException) GoString() string { - return s.String() -} - -func newErrorGlobalTableNotFoundException(v protocol.ResponseMetadata) error { - return &GlobalTableNotFoundException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *GlobalTableNotFoundException) Code() string { - return "GlobalTableNotFoundException" -} - -// Message returns the exception's message. -func (s *GlobalTableNotFoundException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *GlobalTableNotFoundException) OrigErr() error { - return nil -} - -func (s *GlobalTableNotFoundException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *GlobalTableNotFoundException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *GlobalTableNotFoundException) RequestID() string { - return s.RespMetadata.RequestID -} - -// DynamoDB rejected the request because you retried a request with a different -// payload but with an idempotent token that was already used. -type IdempotentParameterMismatchException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"Message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s IdempotentParameterMismatchException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s IdempotentParameterMismatchException) GoString() string { - return s.String() -} - -func newErrorIdempotentParameterMismatchException(v protocol.ResponseMetadata) error { - return &IdempotentParameterMismatchException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *IdempotentParameterMismatchException) Code() string { - return "IdempotentParameterMismatchException" -} - -// Message returns the exception's message. -func (s *IdempotentParameterMismatchException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *IdempotentParameterMismatchException) OrigErr() error { - return nil -} - -func (s *IdempotentParameterMismatchException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *IdempotentParameterMismatchException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *IdempotentParameterMismatchException) RequestID() string { - return s.RespMetadata.RequestID -} - -// There was a conflict when importing from the specified S3 source. This can -// occur when the current import conflicts with a previous import request that -// had the same client token. -type ImportConflictException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ImportConflictException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ImportConflictException) GoString() string { - return s.String() -} - -func newErrorImportConflictException(v protocol.ResponseMetadata) error { - return &ImportConflictException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *ImportConflictException) Code() string { - return "ImportConflictException" -} - -// Message returns the exception's message. -func (s *ImportConflictException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *ImportConflictException) OrigErr() error { - return nil -} - -func (s *ImportConflictException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *ImportConflictException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *ImportConflictException) RequestID() string { - return s.RespMetadata.RequestID -} - -// The specified import was not found. -type ImportNotFoundException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ImportNotFoundException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ImportNotFoundException) GoString() string { - return s.String() -} - -func newErrorImportNotFoundException(v protocol.ResponseMetadata) error { - return &ImportNotFoundException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *ImportNotFoundException) Code() string { - return "ImportNotFoundException" -} - -// Message returns the exception's message. -func (s *ImportNotFoundException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *ImportNotFoundException) OrigErr() error { - return nil -} - -func (s *ImportNotFoundException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *ImportNotFoundException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *ImportNotFoundException) RequestID() string { - return s.RespMetadata.RequestID -} - -// Summary information about the source file for the import. -type ImportSummary struct { - _ struct{} `type:"structure"` - - // The Amazon Resource Number (ARN) of the Cloudwatch Log Group associated with - // this import task. - CloudWatchLogGroupArn *string `min:"1" type:"string"` - - // The time at which this import task ended. (Does this include the successful - // complete creation of the table it was imported to?) - EndTime *time.Time `type:"timestamp"` - - // The Amazon Resource Number (ARN) corresponding to the import request. - ImportArn *string `min:"37" type:"string"` - - // The status of the import operation. - ImportStatus *string `type:"string" enum:"ImportStatus"` - - // The format of the source data. Valid values are CSV, DYNAMODB_JSON or ION. - InputFormat *string `type:"string" enum:"InputFormat"` - - // The path and S3 bucket of the source file that is being imported. This includes - // the S3Bucket (required), S3KeyPrefix (optional) and S3BucketOwner (optional - // if the bucket is owned by the requester). - S3BucketSource *S3BucketSource `type:"structure"` - - // The time at which this import task began. - StartTime *time.Time `type:"timestamp"` - - // The Amazon Resource Number (ARN) of the table being imported into. - TableArn *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ImportSummary) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ImportSummary) GoString() string { - return s.String() -} - -// SetCloudWatchLogGroupArn sets the CloudWatchLogGroupArn field's value. -func (s *ImportSummary) SetCloudWatchLogGroupArn(v string) *ImportSummary { - s.CloudWatchLogGroupArn = &v - return s -} - -// SetEndTime sets the EndTime field's value. -func (s *ImportSummary) SetEndTime(v time.Time) *ImportSummary { - s.EndTime = &v - return s -} - -// SetImportArn sets the ImportArn field's value. -func (s *ImportSummary) SetImportArn(v string) *ImportSummary { - s.ImportArn = &v - return s -} - -// SetImportStatus sets the ImportStatus field's value. -func (s *ImportSummary) SetImportStatus(v string) *ImportSummary { - s.ImportStatus = &v - return s -} - -// SetInputFormat sets the InputFormat field's value. -func (s *ImportSummary) SetInputFormat(v string) *ImportSummary { - s.InputFormat = &v - return s -} - -// SetS3BucketSource sets the S3BucketSource field's value. -func (s *ImportSummary) SetS3BucketSource(v *S3BucketSource) *ImportSummary { - s.S3BucketSource = v - return s -} - -// SetStartTime sets the StartTime field's value. -func (s *ImportSummary) SetStartTime(v time.Time) *ImportSummary { - s.StartTime = &v - return s -} - -// SetTableArn sets the TableArn field's value. -func (s *ImportSummary) SetTableArn(v string) *ImportSummary { - s.TableArn = &v - return s -} - -// Represents the properties of the table being imported into. -type ImportTableDescription struct { - _ struct{} `type:"structure"` - - // The client token that was provided for the import task. Reusing the client - // token on retry makes a call to ImportTable idempotent. - ClientToken *string `type:"string"` - - // The Amazon Resource Number (ARN) of the Cloudwatch Log Group associated with - // the target table. - CloudWatchLogGroupArn *string `min:"1" type:"string"` - - // The time at which the creation of the table associated with this import task - // completed. - EndTime *time.Time `type:"timestamp"` - - // The number of errors occurred on importing the source file into the target - // table. - ErrorCount *int64 `type:"long"` - - // The error code corresponding to the failure that the import job ran into - // during execution. - FailureCode *string `type:"string"` - - // The error message corresponding to the failure that the import job ran into - // during execution. - FailureMessage *string `type:"string"` - - // The Amazon Resource Number (ARN) corresponding to the import request. - ImportArn *string `min:"37" type:"string"` - - // The status of the import. - ImportStatus *string `type:"string" enum:"ImportStatus"` - - // The number of items successfully imported into the new table. - ImportedItemCount *int64 `type:"long"` - - // The compression options for the data that has been imported into the target - // table. The values are NONE, GZIP, or ZSTD. - InputCompressionType *string `type:"string" enum:"InputCompressionType"` - - // The format of the source data going into the target table. - InputFormat *string `type:"string" enum:"InputFormat"` - - // The format options for the data that was imported into the target table. - // There is one value, CsvOption. - InputFormatOptions *InputFormatOptions `type:"structure"` - - // The total number of items processed from the source file. - ProcessedItemCount *int64 `type:"long"` - - // The total size of data processed from the source file, in Bytes. - ProcessedSizeBytes *int64 `type:"long"` - - // Values for the S3 bucket the source file is imported from. Includes bucket - // name (required), key prefix (optional) and bucket account owner ID (optional). - S3BucketSource *S3BucketSource `type:"structure"` - - // The time when this import task started. - StartTime *time.Time `type:"timestamp"` - - // The Amazon Resource Number (ARN) of the table being imported into. - TableArn *string `min:"1" type:"string"` - - // The parameters for the new table that is being imported into. - TableCreationParameters *TableCreationParameters `type:"structure"` - - // The table id corresponding to the table created by import table process. - TableId *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ImportTableDescription) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ImportTableDescription) GoString() string { - return s.String() -} - -// SetClientToken sets the ClientToken field's value. -func (s *ImportTableDescription) SetClientToken(v string) *ImportTableDescription { - s.ClientToken = &v - return s -} - -// SetCloudWatchLogGroupArn sets the CloudWatchLogGroupArn field's value. -func (s *ImportTableDescription) SetCloudWatchLogGroupArn(v string) *ImportTableDescription { - s.CloudWatchLogGroupArn = &v - return s -} - -// SetEndTime sets the EndTime field's value. -func (s *ImportTableDescription) SetEndTime(v time.Time) *ImportTableDescription { - s.EndTime = &v - return s -} - -// SetErrorCount sets the ErrorCount field's value. -func (s *ImportTableDescription) SetErrorCount(v int64) *ImportTableDescription { - s.ErrorCount = &v - return s -} - -// SetFailureCode sets the FailureCode field's value. -func (s *ImportTableDescription) SetFailureCode(v string) *ImportTableDescription { - s.FailureCode = &v - return s -} - -// SetFailureMessage sets the FailureMessage field's value. -func (s *ImportTableDescription) SetFailureMessage(v string) *ImportTableDescription { - s.FailureMessage = &v - return s -} - -// SetImportArn sets the ImportArn field's value. -func (s *ImportTableDescription) SetImportArn(v string) *ImportTableDescription { - s.ImportArn = &v - return s -} - -// SetImportStatus sets the ImportStatus field's value. -func (s *ImportTableDescription) SetImportStatus(v string) *ImportTableDescription { - s.ImportStatus = &v - return s -} - -// SetImportedItemCount sets the ImportedItemCount field's value. -func (s *ImportTableDescription) SetImportedItemCount(v int64) *ImportTableDescription { - s.ImportedItemCount = &v - return s -} - -// SetInputCompressionType sets the InputCompressionType field's value. -func (s *ImportTableDescription) SetInputCompressionType(v string) *ImportTableDescription { - s.InputCompressionType = &v - return s -} - -// SetInputFormat sets the InputFormat field's value. -func (s *ImportTableDescription) SetInputFormat(v string) *ImportTableDescription { - s.InputFormat = &v - return s -} - -// SetInputFormatOptions sets the InputFormatOptions field's value. -func (s *ImportTableDescription) SetInputFormatOptions(v *InputFormatOptions) *ImportTableDescription { - s.InputFormatOptions = v - return s -} - -// SetProcessedItemCount sets the ProcessedItemCount field's value. -func (s *ImportTableDescription) SetProcessedItemCount(v int64) *ImportTableDescription { - s.ProcessedItemCount = &v - return s -} - -// SetProcessedSizeBytes sets the ProcessedSizeBytes field's value. -func (s *ImportTableDescription) SetProcessedSizeBytes(v int64) *ImportTableDescription { - s.ProcessedSizeBytes = &v - return s -} - -// SetS3BucketSource sets the S3BucketSource field's value. -func (s *ImportTableDescription) SetS3BucketSource(v *S3BucketSource) *ImportTableDescription { - s.S3BucketSource = v - return s -} - -// SetStartTime sets the StartTime field's value. -func (s *ImportTableDescription) SetStartTime(v time.Time) *ImportTableDescription { - s.StartTime = &v - return s -} - -// SetTableArn sets the TableArn field's value. -func (s *ImportTableDescription) SetTableArn(v string) *ImportTableDescription { - s.TableArn = &v - return s -} - -// SetTableCreationParameters sets the TableCreationParameters field's value. -func (s *ImportTableDescription) SetTableCreationParameters(v *TableCreationParameters) *ImportTableDescription { - s.TableCreationParameters = v - return s -} - -// SetTableId sets the TableId field's value. -func (s *ImportTableDescription) SetTableId(v string) *ImportTableDescription { - s.TableId = &v - return s -} - -type ImportTableInput struct { - _ struct{} `type:"structure"` - - // Providing a ClientToken makes the call to ImportTableInput idempotent, meaning - // that multiple identical calls have the same effect as one single call. - // - // A client token is valid for 8 hours after the first request that uses it - // is completed. After 8 hours, any request with the same client token is treated - // as a new request. Do not resubmit the same request with the same client token - // for more than 8 hours, or the result might not be idempotent. - // - // If you submit a request with the same client token but a change in other - // parameters within the 8-hour idempotency window, DynamoDB returns an IdempotentParameterMismatch - // exception. - ClientToken *string `type:"string" idempotencyToken:"true"` - - // Type of compression to be used on the input coming from the imported table. - InputCompressionType *string `type:"string" enum:"InputCompressionType"` - - // The format of the source data. Valid values for ImportFormat are CSV, DYNAMODB_JSON - // or ION. - // - // InputFormat is a required field - InputFormat *string `type:"string" required:"true" enum:"InputFormat"` - - // Additional properties that specify how the input is formatted, - InputFormatOptions *InputFormatOptions `type:"structure"` - - // The S3 bucket that provides the source for the import. - // - // S3BucketSource is a required field - S3BucketSource *S3BucketSource `type:"structure" required:"true"` - - // Parameters for the table to import the data into. - // - // TableCreationParameters is a required field - TableCreationParameters *TableCreationParameters `type:"structure" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ImportTableInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ImportTableInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ImportTableInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ImportTableInput"} - if s.InputFormat == nil { - invalidParams.Add(request.NewErrParamRequired("InputFormat")) - } - if s.S3BucketSource == nil { - invalidParams.Add(request.NewErrParamRequired("S3BucketSource")) - } - if s.TableCreationParameters == nil { - invalidParams.Add(request.NewErrParamRequired("TableCreationParameters")) - } - if s.InputFormatOptions != nil { - if err := s.InputFormatOptions.Validate(); err != nil { - invalidParams.AddNested("InputFormatOptions", err.(request.ErrInvalidParams)) - } - } - if s.S3BucketSource != nil { - if err := s.S3BucketSource.Validate(); err != nil { - invalidParams.AddNested("S3BucketSource", err.(request.ErrInvalidParams)) - } - } - if s.TableCreationParameters != nil { - if err := s.TableCreationParameters.Validate(); err != nil { - invalidParams.AddNested("TableCreationParameters", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetClientToken sets the ClientToken field's value. -func (s *ImportTableInput) SetClientToken(v string) *ImportTableInput { - s.ClientToken = &v - return s -} - -// SetInputCompressionType sets the InputCompressionType field's value. -func (s *ImportTableInput) SetInputCompressionType(v string) *ImportTableInput { - s.InputCompressionType = &v - return s -} - -// SetInputFormat sets the InputFormat field's value. -func (s *ImportTableInput) SetInputFormat(v string) *ImportTableInput { - s.InputFormat = &v - return s -} - -// SetInputFormatOptions sets the InputFormatOptions field's value. -func (s *ImportTableInput) SetInputFormatOptions(v *InputFormatOptions) *ImportTableInput { - s.InputFormatOptions = v - return s -} - -// SetS3BucketSource sets the S3BucketSource field's value. -func (s *ImportTableInput) SetS3BucketSource(v *S3BucketSource) *ImportTableInput { - s.S3BucketSource = v - return s -} - -// SetTableCreationParameters sets the TableCreationParameters field's value. -func (s *ImportTableInput) SetTableCreationParameters(v *TableCreationParameters) *ImportTableInput { - s.TableCreationParameters = v - return s -} - -type ImportTableOutput struct { - _ struct{} `type:"structure"` - - // Represents the properties of the table created for the import, and parameters - // of the import. The import parameters include import status, how many items - // were processed, and how many errors were encountered. - // - // ImportTableDescription is a required field - ImportTableDescription *ImportTableDescription `type:"structure" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ImportTableOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ImportTableOutput) GoString() string { - return s.String() -} - -// SetImportTableDescription sets the ImportTableDescription field's value. -func (s *ImportTableOutput) SetImportTableDescription(v *ImportTableDescription) *ImportTableOutput { - s.ImportTableDescription = v - return s -} - -// Optional object containing the parameters specific to an incremental export. -type IncrementalExportSpecification struct { - _ struct{} `type:"structure"` - - // Time in the past which provides the inclusive start range for the export - // table's data, counted in seconds from the start of the Unix epoch. The incremental - // export will reflect the table's state including and after this point in time. - ExportFromTime *time.Time `type:"timestamp"` - - // Time in the past which provides the exclusive end range for the export table's - // data, counted in seconds from the start of the Unix epoch. The incremental - // export will reflect the table's state just prior to this point in time. If - // this is not provided, the latest time with data available will be used. - ExportToTime *time.Time `type:"timestamp"` - - // The view type that was chosen for the export. Valid values are NEW_AND_OLD_IMAGES - // and NEW_IMAGES. The default value is NEW_AND_OLD_IMAGES. - ExportViewType *string `type:"string" enum:"ExportViewType"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s IncrementalExportSpecification) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s IncrementalExportSpecification) GoString() string { - return s.String() -} - -// SetExportFromTime sets the ExportFromTime field's value. -func (s *IncrementalExportSpecification) SetExportFromTime(v time.Time) *IncrementalExportSpecification { - s.ExportFromTime = &v - return s -} - -// SetExportToTime sets the ExportToTime field's value. -func (s *IncrementalExportSpecification) SetExportToTime(v time.Time) *IncrementalExportSpecification { - s.ExportToTime = &v - return s -} - -// SetExportViewType sets the ExportViewType field's value. -func (s *IncrementalExportSpecification) SetExportViewType(v string) *IncrementalExportSpecification { - s.ExportViewType = &v - return s -} - -// The operation tried to access a nonexistent index. -type IndexNotFoundException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s IndexNotFoundException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s IndexNotFoundException) GoString() string { - return s.String() -} - -func newErrorIndexNotFoundException(v protocol.ResponseMetadata) error { - return &IndexNotFoundException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *IndexNotFoundException) Code() string { - return "IndexNotFoundException" -} - -// Message returns the exception's message. -func (s *IndexNotFoundException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *IndexNotFoundException) OrigErr() error { - return nil -} - -func (s *IndexNotFoundException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *IndexNotFoundException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *IndexNotFoundException) RequestID() string { - return s.RespMetadata.RequestID -} - -// The format options for the data that was imported into the target table. -// There is one value, CsvOption. -type InputFormatOptions struct { - _ struct{} `type:"structure"` - - // The options for imported source files in CSV format. The values are Delimiter - // and HeaderList. - Csv *CsvOptions `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s InputFormatOptions) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s InputFormatOptions) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *InputFormatOptions) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "InputFormatOptions"} - if s.Csv != nil { - if err := s.Csv.Validate(); err != nil { - invalidParams.AddNested("Csv", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCsv sets the Csv field's value. -func (s *InputFormatOptions) SetCsv(v *CsvOptions) *InputFormatOptions { - s.Csv = v - return s -} - -// An error occurred on the server side. -type InternalServerError struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - // The server encountered an internal error trying to fulfill the request. - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s InternalServerError) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s InternalServerError) GoString() string { - return s.String() -} - -func newErrorInternalServerError(v protocol.ResponseMetadata) error { - return &InternalServerError{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *InternalServerError) Code() string { - return "InternalServerError" -} - -// Message returns the exception's message. -func (s *InternalServerError) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *InternalServerError) OrigErr() error { - return nil -} - -func (s *InternalServerError) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *InternalServerError) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *InternalServerError) RequestID() string { - return s.RespMetadata.RequestID -} - -// The specified ExportTime is outside of the point in time recovery window. -type InvalidExportTimeException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s InvalidExportTimeException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s InvalidExportTimeException) GoString() string { - return s.String() -} - -func newErrorInvalidExportTimeException(v protocol.ResponseMetadata) error { - return &InvalidExportTimeException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *InvalidExportTimeException) Code() string { - return "InvalidExportTimeException" -} - -// Message returns the exception's message. -func (s *InvalidExportTimeException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *InvalidExportTimeException) OrigErr() error { - return nil -} - -func (s *InvalidExportTimeException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *InvalidExportTimeException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *InvalidExportTimeException) RequestID() string { - return s.RespMetadata.RequestID -} - -// An invalid restore time was specified. RestoreDateTime must be between EarliestRestorableDateTime -// and LatestRestorableDateTime. -type InvalidRestoreTimeException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s InvalidRestoreTimeException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s InvalidRestoreTimeException) GoString() string { - return s.String() -} - -func newErrorInvalidRestoreTimeException(v protocol.ResponseMetadata) error { - return &InvalidRestoreTimeException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *InvalidRestoreTimeException) Code() string { - return "InvalidRestoreTimeException" -} - -// Message returns the exception's message. -func (s *InvalidRestoreTimeException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *InvalidRestoreTimeException) OrigErr() error { - return nil -} - -func (s *InvalidRestoreTimeException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *InvalidRestoreTimeException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *InvalidRestoreTimeException) RequestID() string { - return s.RespMetadata.RequestID -} - -// Information about item collections, if any, that were affected by the operation. -// ItemCollectionMetrics is only returned if the request asked for it. If the -// table does not have any local secondary indexes, this information is not -// returned in the response. -type ItemCollectionMetrics struct { - _ struct{} `type:"structure"` - - // The partition key value of the item collection. This value is the same as - // the partition key value of the item. - ItemCollectionKey map[string]*AttributeValue `type:"map"` - - // An estimate of item collection size, in gigabytes. This value is a two-element - // array containing a lower bound and an upper bound for the estimate. The estimate - // includes the size of all the items in the table, plus the size of all attributes - // projected into all of the local secondary indexes on that table. Use this - // estimate to measure whether a local secondary index is approaching its size - // limit. - // - // The estimate is subject to change over time; therefore, do not rely on the - // precision or accuracy of the estimate. - SizeEstimateRangeGB []*float64 `type:"list"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ItemCollectionMetrics) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ItemCollectionMetrics) GoString() string { - return s.String() -} - -// SetItemCollectionKey sets the ItemCollectionKey field's value. -func (s *ItemCollectionMetrics) SetItemCollectionKey(v map[string]*AttributeValue) *ItemCollectionMetrics { - s.ItemCollectionKey = v - return s -} - -// SetSizeEstimateRangeGB sets the SizeEstimateRangeGB field's value. -func (s *ItemCollectionMetrics) SetSizeEstimateRangeGB(v []*float64) *ItemCollectionMetrics { - s.SizeEstimateRangeGB = v - return s -} - -// An item collection is too large. This exception is only returned for tables -// that have one or more local secondary indexes. -type ItemCollectionSizeLimitExceededException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - // The total size of an item collection has exceeded the maximum limit of 10 - // gigabytes. - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ItemCollectionSizeLimitExceededException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ItemCollectionSizeLimitExceededException) GoString() string { - return s.String() -} - -func newErrorItemCollectionSizeLimitExceededException(v protocol.ResponseMetadata) error { - return &ItemCollectionSizeLimitExceededException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *ItemCollectionSizeLimitExceededException) Code() string { - return "ItemCollectionSizeLimitExceededException" -} - -// Message returns the exception's message. -func (s *ItemCollectionSizeLimitExceededException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *ItemCollectionSizeLimitExceededException) OrigErr() error { - return nil -} - -func (s *ItemCollectionSizeLimitExceededException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *ItemCollectionSizeLimitExceededException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *ItemCollectionSizeLimitExceededException) RequestID() string { - return s.RespMetadata.RequestID -} - -// Details for the requested item. -type ItemResponse struct { - _ struct{} `type:"structure"` - - // Map of attribute data consisting of the data type and attribute value. - Item map[string]*AttributeValue `type:"map"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ItemResponse) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ItemResponse) GoString() string { - return s.String() -} - -// SetItem sets the Item field's value. -func (s *ItemResponse) SetItem(v map[string]*AttributeValue) *ItemResponse { - s.Item = v - return s -} - -// Represents a single element of a key schema. A key schema specifies the attributes -// that make up the primary key of a table, or the key attributes of an index. -// -// A KeySchemaElement represents exactly one attribute of the primary key. For -// example, a simple primary key would be represented by one KeySchemaElement -// (for the partition key). A composite primary key would require one KeySchemaElement -// for the partition key, and another KeySchemaElement for the sort key. -// -// A KeySchemaElement must be a scalar, top-level attribute (not a nested attribute). -// The data type must be one of String, Number, or Binary. The attribute cannot -// be nested within a List or a Map. -type KeySchemaElement struct { - _ struct{} `type:"structure"` - - // The name of a key attribute. - // - // AttributeName is a required field - AttributeName *string `min:"1" type:"string" required:"true"` - - // The role that this key attribute will assume: - // - // * HASH - partition key - // - // * RANGE - sort key - // - // The partition key of an item is also known as its hash attribute. The term - // "hash attribute" derives from DynamoDB's usage of an internal hash function - // to evenly distribute data items across partitions, based on their partition - // key values. - // - // The sort key of an item is also known as its range attribute. The term "range - // attribute" derives from the way DynamoDB stores items with the same partition - // key physically close together, in sorted order by the sort key value. - // - // KeyType is a required field - KeyType *string `type:"string" required:"true" enum:"KeyType"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s KeySchemaElement) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s KeySchemaElement) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *KeySchemaElement) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "KeySchemaElement"} - if s.AttributeName == nil { - invalidParams.Add(request.NewErrParamRequired("AttributeName")) - } - if s.AttributeName != nil && len(*s.AttributeName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("AttributeName", 1)) - } - if s.KeyType == nil { - invalidParams.Add(request.NewErrParamRequired("KeyType")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAttributeName sets the AttributeName field's value. -func (s *KeySchemaElement) SetAttributeName(v string) *KeySchemaElement { - s.AttributeName = &v - return s -} - -// SetKeyType sets the KeyType field's value. -func (s *KeySchemaElement) SetKeyType(v string) *KeySchemaElement { - s.KeyType = &v - return s -} - -// Represents a set of primary keys and, for each key, the attributes to retrieve -// from the table. -// -// For each primary key, you must provide all of the key attributes. For example, -// with a simple primary key, you only need to provide the partition key. For -// a composite primary key, you must provide both the partition key and the -// sort key. -type KeysAndAttributes struct { - _ struct{} `type:"structure"` - - // This is a legacy parameter. Use ProjectionExpression instead. For more information, - // see Legacy Conditional Parameters (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.html) - // in the Amazon DynamoDB Developer Guide. - AttributesToGet []*string `min:"1" type:"list"` - - // The consistency of a read operation. If set to true, then a strongly consistent - // read is used; otherwise, an eventually consistent read is used. - ConsistentRead *bool `type:"boolean"` - - // One or more substitution tokens for attribute names in an expression. The - // following are some use cases for using ExpressionAttributeNames: - // - // * To access an attribute whose name conflicts with a DynamoDB reserved - // word. - // - // * To create a placeholder for repeating occurrences of an attribute name - // in an expression. - // - // * To prevent special characters in an attribute name from being misinterpreted - // in an expression. - // - // Use the # character in an expression to dereference an attribute name. For - // example, consider the following attribute name: - // - // * Percentile - // - // The name of this attribute conflicts with a reserved word, so it cannot be - // used directly in an expression. (For the complete list of reserved words, - // see Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) - // in the Amazon DynamoDB Developer Guide). To work around this, you could specify - // the following for ExpressionAttributeNames: - // - // * {"#P":"Percentile"} - // - // You could then use this substitution in an expression, as in this example: - // - // * #P = :val - // - // Tokens that begin with the : character are expression attribute values, which - // are placeholders for the actual value at runtime. - // - // For more information on expression attribute names, see Accessing Item Attributes - // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) - // in the Amazon DynamoDB Developer Guide. - ExpressionAttributeNames map[string]*string `type:"map"` - - // The primary key attribute values that define the items and the attributes - // associated with the items. - // - // Keys is a required field - Keys []map[string]*AttributeValue `min:"1" type:"list" required:"true"` - - // A string that identifies one or more attributes to retrieve from the table. - // These attributes can include scalars, sets, or elements of a JSON document. - // The attributes in the ProjectionExpression must be separated by commas. - // - // If no attribute names are specified, then all attributes will be returned. - // If any of the requested attributes are not found, they will not appear in - // the result. - // - // For more information, see Accessing Item Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) - // in the Amazon DynamoDB Developer Guide. - ProjectionExpression *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s KeysAndAttributes) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s KeysAndAttributes) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *KeysAndAttributes) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "KeysAndAttributes"} - if s.AttributesToGet != nil && len(s.AttributesToGet) < 1 { - invalidParams.Add(request.NewErrParamMinLen("AttributesToGet", 1)) - } - if s.Keys == nil { - invalidParams.Add(request.NewErrParamRequired("Keys")) - } - if s.Keys != nil && len(s.Keys) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Keys", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAttributesToGet sets the AttributesToGet field's value. -func (s *KeysAndAttributes) SetAttributesToGet(v []*string) *KeysAndAttributes { - s.AttributesToGet = v - return s -} - -// SetConsistentRead sets the ConsistentRead field's value. -func (s *KeysAndAttributes) SetConsistentRead(v bool) *KeysAndAttributes { - s.ConsistentRead = &v - return s -} - -// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value. -func (s *KeysAndAttributes) SetExpressionAttributeNames(v map[string]*string) *KeysAndAttributes { - s.ExpressionAttributeNames = v - return s -} - -// SetKeys sets the Keys field's value. -func (s *KeysAndAttributes) SetKeys(v []map[string]*AttributeValue) *KeysAndAttributes { - s.Keys = v - return s -} - -// SetProjectionExpression sets the ProjectionExpression field's value. -func (s *KeysAndAttributes) SetProjectionExpression(v string) *KeysAndAttributes { - s.ProjectionExpression = &v - return s -} - -// Describes a Kinesis data stream destination. -type KinesisDataStreamDestination struct { - _ struct{} `type:"structure"` - - // The precision of the Kinesis data stream timestamp. The values are either - // MILLISECOND or MICROSECOND. - ApproximateCreationDateTimePrecision *string `type:"string" enum:"ApproximateCreationDateTimePrecision"` - - // The current status of replication. - DestinationStatus *string `type:"string" enum:"DestinationStatus"` - - // The human-readable string that corresponds to the replica status. - DestinationStatusDescription *string `type:"string"` - - // The ARN for a specific Kinesis data stream. - StreamArn *string `min:"37" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s KinesisDataStreamDestination) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s KinesisDataStreamDestination) GoString() string { - return s.String() -} - -// SetApproximateCreationDateTimePrecision sets the ApproximateCreationDateTimePrecision field's value. -func (s *KinesisDataStreamDestination) SetApproximateCreationDateTimePrecision(v string) *KinesisDataStreamDestination { - s.ApproximateCreationDateTimePrecision = &v - return s -} - -// SetDestinationStatus sets the DestinationStatus field's value. -func (s *KinesisDataStreamDestination) SetDestinationStatus(v string) *KinesisDataStreamDestination { - s.DestinationStatus = &v - return s -} - -// SetDestinationStatusDescription sets the DestinationStatusDescription field's value. -func (s *KinesisDataStreamDestination) SetDestinationStatusDescription(v string) *KinesisDataStreamDestination { - s.DestinationStatusDescription = &v - return s -} - -// SetStreamArn sets the StreamArn field's value. -func (s *KinesisDataStreamDestination) SetStreamArn(v string) *KinesisDataStreamDestination { - s.StreamArn = &v - return s -} - -// There is no limit to the number of daily on-demand backups that can be taken. -// -// For most purposes, up to 500 simultaneous table operations are allowed per -// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, -// RestoreTableFromBackup, and RestoreTableToPointInTime. -// -// When you are creating a table with one or more secondary indexes, you can -// have up to 250 such requests running at a time. However, if the table or -// index specifications are complex, then DynamoDB might temporarily reduce -// the number of concurrent operations. -// -// When importing into DynamoDB, up to 50 simultaneous import table operations -// are allowed per account. -// -// There is a soft account quota of 2,500 tables. -// -// GetRecords was called with a value of more than 1000 for the limit request -// parameter. -// -// More than 2 processes are reading from the same streams shard at the same -// time. Exceeding this limit may result in request throttling. -type LimitExceededException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - // Too many operations for a given subscriber. - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s LimitExceededException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s LimitExceededException) GoString() string { - return s.String() -} - -func newErrorLimitExceededException(v protocol.ResponseMetadata) error { - return &LimitExceededException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *LimitExceededException) Code() string { - return "LimitExceededException" -} - -// Message returns the exception's message. -func (s *LimitExceededException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *LimitExceededException) OrigErr() error { - return nil -} - -func (s *LimitExceededException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *LimitExceededException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *LimitExceededException) RequestID() string { - return s.RespMetadata.RequestID -} - -type ListBackupsInput struct { - _ struct{} `type:"structure"` - - // The backups from the table specified by BackupType are listed. - // - // Where BackupType can be: - // - // * USER - On-demand backup created by you. (The default setting if no other - // backup types are specified.) - // - // * SYSTEM - On-demand backup automatically created by DynamoDB. - // - // * ALL - All types of on-demand backups (USER and SYSTEM). - BackupType *string `type:"string" enum:"BackupTypeFilter"` - - // LastEvaluatedBackupArn is the Amazon Resource Name (ARN) of the backup last - // evaluated when the current page of results was returned, inclusive of the - // current page of results. This value may be specified as the ExclusiveStartBackupArn - // of a new ListBackups operation in order to fetch the next page of results. - ExclusiveStartBackupArn *string `min:"37" type:"string"` - - // Maximum number of backups to return at once. - Limit *int64 `min:"1" type:"integer"` - - // Lists the backups from the table specified in TableName. You can also provide - // the Amazon Resource Name (ARN) of the table in this parameter. - TableName *string `min:"1" type:"string"` - - // Only backups created after this time are listed. TimeRangeLowerBound is inclusive. - TimeRangeLowerBound *time.Time `type:"timestamp"` - - // Only backups created before this time are listed. TimeRangeUpperBound is - // exclusive. - TimeRangeUpperBound *time.Time `type:"timestamp"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListBackupsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListBackupsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListBackupsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListBackupsInput"} - if s.ExclusiveStartBackupArn != nil && len(*s.ExclusiveStartBackupArn) < 37 { - invalidParams.Add(request.NewErrParamMinLen("ExclusiveStartBackupArn", 37)) - } - if s.Limit != nil && *s.Limit < 1 { - invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) - } - if s.TableName != nil && len(*s.TableName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBackupType sets the BackupType field's value. -func (s *ListBackupsInput) SetBackupType(v string) *ListBackupsInput { - s.BackupType = &v - return s -} - -// SetExclusiveStartBackupArn sets the ExclusiveStartBackupArn field's value. -func (s *ListBackupsInput) SetExclusiveStartBackupArn(v string) *ListBackupsInput { - s.ExclusiveStartBackupArn = &v - return s -} - -// SetLimit sets the Limit field's value. -func (s *ListBackupsInput) SetLimit(v int64) *ListBackupsInput { - s.Limit = &v - return s -} - -// SetTableName sets the TableName field's value. -func (s *ListBackupsInput) SetTableName(v string) *ListBackupsInput { - s.TableName = &v - return s -} - -// SetTimeRangeLowerBound sets the TimeRangeLowerBound field's value. -func (s *ListBackupsInput) SetTimeRangeLowerBound(v time.Time) *ListBackupsInput { - s.TimeRangeLowerBound = &v - return s -} - -// SetTimeRangeUpperBound sets the TimeRangeUpperBound field's value. -func (s *ListBackupsInput) SetTimeRangeUpperBound(v time.Time) *ListBackupsInput { - s.TimeRangeUpperBound = &v - return s -} - -type ListBackupsOutput struct { - _ struct{} `type:"structure"` - - // List of BackupSummary objects. - BackupSummaries []*BackupSummary `type:"list"` - - // The ARN of the backup last evaluated when the current page of results was - // returned, inclusive of the current page of results. This value may be specified - // as the ExclusiveStartBackupArn of a new ListBackups operation in order to - // fetch the next page of results. - // - // If LastEvaluatedBackupArn is empty, then the last page of results has been - // processed and there are no more results to be retrieved. - // - // If LastEvaluatedBackupArn is not empty, this may or may not indicate that - // there is more data to be returned. All results are guaranteed to have been - // returned if and only if no value for LastEvaluatedBackupArn is returned. - LastEvaluatedBackupArn *string `min:"37" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListBackupsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListBackupsOutput) GoString() string { - return s.String() -} - -// SetBackupSummaries sets the BackupSummaries field's value. -func (s *ListBackupsOutput) SetBackupSummaries(v []*BackupSummary) *ListBackupsOutput { - s.BackupSummaries = v - return s -} - -// SetLastEvaluatedBackupArn sets the LastEvaluatedBackupArn field's value. -func (s *ListBackupsOutput) SetLastEvaluatedBackupArn(v string) *ListBackupsOutput { - s.LastEvaluatedBackupArn = &v - return s -} - -type ListContributorInsightsInput struct { - _ struct{} `type:"structure"` - - // Maximum number of results to return per page. - MaxResults *int64 `type:"integer"` - - // A token to for the desired page, if there is one. - NextToken *string `type:"string"` - - // The name of the table. You can also provide the Amazon Resource Name (ARN) - // of the table in this parameter. - TableName *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListContributorInsightsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListContributorInsightsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListContributorInsightsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListContributorInsightsInput"} - if s.TableName != nil && len(*s.TableName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetMaxResults sets the MaxResults field's value. -func (s *ListContributorInsightsInput) SetMaxResults(v int64) *ListContributorInsightsInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListContributorInsightsInput) SetNextToken(v string) *ListContributorInsightsInput { - s.NextToken = &v - return s -} - -// SetTableName sets the TableName field's value. -func (s *ListContributorInsightsInput) SetTableName(v string) *ListContributorInsightsInput { - s.TableName = &v - return s -} - -type ListContributorInsightsOutput struct { - _ struct{} `type:"structure"` - - // A list of ContributorInsightsSummary. - ContributorInsightsSummaries []*ContributorInsightsSummary `type:"list"` - - // A token to go to the next page if there is one. - NextToken *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListContributorInsightsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListContributorInsightsOutput) GoString() string { - return s.String() -} - -// SetContributorInsightsSummaries sets the ContributorInsightsSummaries field's value. -func (s *ListContributorInsightsOutput) SetContributorInsightsSummaries(v []*ContributorInsightsSummary) *ListContributorInsightsOutput { - s.ContributorInsightsSummaries = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListContributorInsightsOutput) SetNextToken(v string) *ListContributorInsightsOutput { - s.NextToken = &v - return s -} - -type ListExportsInput struct { - _ struct{} `type:"structure"` - - // Maximum number of results to return per page. - MaxResults *int64 `min:"1" type:"integer"` - - // An optional string that, if supplied, must be copied from the output of a - // previous call to ListExports. When provided in this manner, the API fetches - // the next page of results. - NextToken *string `type:"string"` - - // The Amazon Resource Name (ARN) associated with the exported table. - TableArn *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListExportsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListExportsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListExportsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListExportsInput"} - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - if s.TableArn != nil && len(*s.TableArn) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TableArn", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetMaxResults sets the MaxResults field's value. -func (s *ListExportsInput) SetMaxResults(v int64) *ListExportsInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListExportsInput) SetNextToken(v string) *ListExportsInput { - s.NextToken = &v - return s -} - -// SetTableArn sets the TableArn field's value. -func (s *ListExportsInput) SetTableArn(v string) *ListExportsInput { - s.TableArn = &v - return s -} - -type ListExportsOutput struct { - _ struct{} `type:"structure"` - - // A list of ExportSummary objects. - ExportSummaries []*ExportSummary `type:"list"` - - // If this value is returned, there are additional results to be displayed. - // To retrieve them, call ListExports again, with NextToken set to this value. - NextToken *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListExportsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListExportsOutput) GoString() string { - return s.String() -} - -// SetExportSummaries sets the ExportSummaries field's value. -func (s *ListExportsOutput) SetExportSummaries(v []*ExportSummary) *ListExportsOutput { - s.ExportSummaries = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListExportsOutput) SetNextToken(v string) *ListExportsOutput { - s.NextToken = &v - return s -} - -type ListGlobalTablesInput struct { - _ struct{} `type:"structure"` - - // The first global table name that this operation will evaluate. - ExclusiveStartGlobalTableName *string `min:"3" type:"string"` - - // The maximum number of table names to return, if the parameter is not specified - // DynamoDB defaults to 100. - // - // If the number of global tables DynamoDB finds reaches this limit, it stops - // the operation and returns the table names collected up to that point, with - // a table name in the LastEvaluatedGlobalTableName to apply in a subsequent - // operation to the ExclusiveStartGlobalTableName parameter. - Limit *int64 `min:"1" type:"integer"` - - // Lists the global tables in a specific Region. - RegionName *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListGlobalTablesInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListGlobalTablesInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListGlobalTablesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListGlobalTablesInput"} - if s.ExclusiveStartGlobalTableName != nil && len(*s.ExclusiveStartGlobalTableName) < 3 { - invalidParams.Add(request.NewErrParamMinLen("ExclusiveStartGlobalTableName", 3)) - } - if s.Limit != nil && *s.Limit < 1 { - invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetExclusiveStartGlobalTableName sets the ExclusiveStartGlobalTableName field's value. -func (s *ListGlobalTablesInput) SetExclusiveStartGlobalTableName(v string) *ListGlobalTablesInput { - s.ExclusiveStartGlobalTableName = &v - return s -} - -// SetLimit sets the Limit field's value. -func (s *ListGlobalTablesInput) SetLimit(v int64) *ListGlobalTablesInput { - s.Limit = &v - return s -} - -// SetRegionName sets the RegionName field's value. -func (s *ListGlobalTablesInput) SetRegionName(v string) *ListGlobalTablesInput { - s.RegionName = &v - return s -} - -type ListGlobalTablesOutput struct { - _ struct{} `type:"structure"` - - // List of global table names. - GlobalTables []*GlobalTable `type:"list"` - - // Last evaluated global table name. - LastEvaluatedGlobalTableName *string `min:"3" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListGlobalTablesOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListGlobalTablesOutput) GoString() string { - return s.String() -} - -// SetGlobalTables sets the GlobalTables field's value. -func (s *ListGlobalTablesOutput) SetGlobalTables(v []*GlobalTable) *ListGlobalTablesOutput { - s.GlobalTables = v - return s -} - -// SetLastEvaluatedGlobalTableName sets the LastEvaluatedGlobalTableName field's value. -func (s *ListGlobalTablesOutput) SetLastEvaluatedGlobalTableName(v string) *ListGlobalTablesOutput { - s.LastEvaluatedGlobalTableName = &v - return s -} - -type ListImportsInput struct { - _ struct{} `type:"structure"` - - // An optional string that, if supplied, must be copied from the output of a - // previous call to ListImports. When provided in this manner, the API fetches - // the next page of results. - NextToken *string `min:"112" type:"string"` - - // The number of ImportSummary objects returned in a single page. - PageSize *int64 `min:"1" type:"integer"` - - // The Amazon Resource Name (ARN) associated with the table that was imported - // to. - TableArn *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListImportsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListImportsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListImportsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListImportsInput"} - if s.NextToken != nil && len(*s.NextToken) < 112 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 112)) - } - if s.PageSize != nil && *s.PageSize < 1 { - invalidParams.Add(request.NewErrParamMinValue("PageSize", 1)) - } - if s.TableArn != nil && len(*s.TableArn) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TableArn", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetNextToken sets the NextToken field's value. -func (s *ListImportsInput) SetNextToken(v string) *ListImportsInput { - s.NextToken = &v - return s -} - -// SetPageSize sets the PageSize field's value. -func (s *ListImportsInput) SetPageSize(v int64) *ListImportsInput { - s.PageSize = &v - return s -} - -// SetTableArn sets the TableArn field's value. -func (s *ListImportsInput) SetTableArn(v string) *ListImportsInput { - s.TableArn = &v - return s -} - -type ListImportsOutput struct { - _ struct{} `type:"structure"` - - // A list of ImportSummary objects. - ImportSummaryList []*ImportSummary `type:"list"` - - // If this value is returned, there are additional results to be displayed. - // To retrieve them, call ListImports again, with NextToken set to this value. - NextToken *string `min:"112" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListImportsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListImportsOutput) GoString() string { - return s.String() -} - -// SetImportSummaryList sets the ImportSummaryList field's value. -func (s *ListImportsOutput) SetImportSummaryList(v []*ImportSummary) *ListImportsOutput { - s.ImportSummaryList = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListImportsOutput) SetNextToken(v string) *ListImportsOutput { - s.NextToken = &v - return s -} - -// Represents the input of a ListTables operation. -type ListTablesInput struct { - _ struct{} `type:"structure"` - - // The first table name that this operation will evaluate. Use the value that - // was returned for LastEvaluatedTableName in a previous operation, so that - // you can obtain the next page of results. - ExclusiveStartTableName *string `min:"3" type:"string"` - - // A maximum number of table names to return. If this parameter is not specified, - // the limit is 100. - Limit *int64 `min:"1" type:"integer"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListTablesInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListTablesInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListTablesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListTablesInput"} - if s.ExclusiveStartTableName != nil && len(*s.ExclusiveStartTableName) < 3 { - invalidParams.Add(request.NewErrParamMinLen("ExclusiveStartTableName", 3)) - } - if s.Limit != nil && *s.Limit < 1 { - invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetExclusiveStartTableName sets the ExclusiveStartTableName field's value. -func (s *ListTablesInput) SetExclusiveStartTableName(v string) *ListTablesInput { - s.ExclusiveStartTableName = &v - return s -} - -// SetLimit sets the Limit field's value. -func (s *ListTablesInput) SetLimit(v int64) *ListTablesInput { - s.Limit = &v - return s -} - -// Represents the output of a ListTables operation. -type ListTablesOutput struct { - _ struct{} `type:"structure"` - - // The name of the last table in the current page of results. Use this value - // as the ExclusiveStartTableName in a new request to obtain the next page of - // results, until all the table names are returned. - // - // If you do not receive a LastEvaluatedTableName value in the response, this - // means that there are no more table names to be retrieved. - LastEvaluatedTableName *string `min:"3" type:"string"` - - // The names of the tables associated with the current account at the current - // endpoint. The maximum size of this array is 100. - // - // If LastEvaluatedTableName also appears in the output, you can use this value - // as the ExclusiveStartTableName parameter in a subsequent ListTables request - // and obtain the next page of results. - TableNames []*string `type:"list"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListTablesOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListTablesOutput) GoString() string { - return s.String() -} - -// SetLastEvaluatedTableName sets the LastEvaluatedTableName field's value. -func (s *ListTablesOutput) SetLastEvaluatedTableName(v string) *ListTablesOutput { - s.LastEvaluatedTableName = &v - return s -} - -// SetTableNames sets the TableNames field's value. -func (s *ListTablesOutput) SetTableNames(v []*string) *ListTablesOutput { - s.TableNames = v - return s -} - -type ListTagsOfResourceInput struct { - _ struct{} `type:"structure"` - - // An optional string that, if supplied, must be copied from the output of a - // previous call to ListTagOfResource. When provided in this manner, this API - // fetches the next page of results. - NextToken *string `type:"string"` - - // The Amazon DynamoDB resource with tags to be listed. This value is an Amazon - // Resource Name (ARN). - // - // ResourceArn is a required field - ResourceArn *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListTagsOfResourceInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListTagsOfResourceInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListTagsOfResourceInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListTagsOfResourceInput"} - if s.ResourceArn == nil { - invalidParams.Add(request.NewErrParamRequired("ResourceArn")) - } - if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetNextToken sets the NextToken field's value. -func (s *ListTagsOfResourceInput) SetNextToken(v string) *ListTagsOfResourceInput { - s.NextToken = &v - return s -} - -// SetResourceArn sets the ResourceArn field's value. -func (s *ListTagsOfResourceInput) SetResourceArn(v string) *ListTagsOfResourceInput { - s.ResourceArn = &v - return s -} - -type ListTagsOfResourceOutput struct { - _ struct{} `type:"structure"` - - // If this value is returned, there are additional results to be displayed. - // To retrieve them, call ListTagsOfResource again, with NextToken set to this - // value. - NextToken *string `type:"string"` - - // The tags currently associated with the Amazon DynamoDB resource. - Tags []*Tag `type:"list"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListTagsOfResourceOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListTagsOfResourceOutput) GoString() string { - return s.String() -} - -// SetNextToken sets the NextToken field's value. -func (s *ListTagsOfResourceOutput) SetNextToken(v string) *ListTagsOfResourceOutput { - s.NextToken = &v - return s -} - -// SetTags sets the Tags field's value. -func (s *ListTagsOfResourceOutput) SetTags(v []*Tag) *ListTagsOfResourceOutput { - s.Tags = v - return s -} - -// Represents the properties of a local secondary index. -type LocalSecondaryIndex struct { - _ struct{} `type:"structure"` - - // The name of the local secondary index. The name must be unique among all - // other indexes on this table. - // - // IndexName is a required field - IndexName *string `min:"3" type:"string" required:"true"` - - // The complete key schema for the local secondary index, consisting of one - // or more pairs of attribute names and key types: - // - // * HASH - partition key - // - // * RANGE - sort key - // - // The partition key of an item is also known as its hash attribute. The term - // "hash attribute" derives from DynamoDB's usage of an internal hash function - // to evenly distribute data items across partitions, based on their partition - // key values. - // - // The sort key of an item is also known as its range attribute. The term "range - // attribute" derives from the way DynamoDB stores items with the same partition - // key physically close together, in sorted order by the sort key value. - // - // KeySchema is a required field - KeySchema []*KeySchemaElement `min:"1" type:"list" required:"true"` - - // Represents attributes that are copied (projected) from the table into the - // local secondary index. These are in addition to the primary key attributes - // and index key attributes, which are automatically projected. - // - // Projection is a required field - Projection *Projection `type:"structure" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s LocalSecondaryIndex) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s LocalSecondaryIndex) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *LocalSecondaryIndex) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "LocalSecondaryIndex"} - if s.IndexName == nil { - invalidParams.Add(request.NewErrParamRequired("IndexName")) - } - if s.IndexName != nil && len(*s.IndexName) < 3 { - invalidParams.Add(request.NewErrParamMinLen("IndexName", 3)) - } - if s.KeySchema == nil { - invalidParams.Add(request.NewErrParamRequired("KeySchema")) - } - if s.KeySchema != nil && len(s.KeySchema) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeySchema", 1)) - } - if s.Projection == nil { - invalidParams.Add(request.NewErrParamRequired("Projection")) - } - if s.KeySchema != nil { - for i, v := range s.KeySchema { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "KeySchema", i), err.(request.ErrInvalidParams)) - } - } - } - if s.Projection != nil { - if err := s.Projection.Validate(); err != nil { - invalidParams.AddNested("Projection", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetIndexName sets the IndexName field's value. -func (s *LocalSecondaryIndex) SetIndexName(v string) *LocalSecondaryIndex { - s.IndexName = &v - return s -} - -// SetKeySchema sets the KeySchema field's value. -func (s *LocalSecondaryIndex) SetKeySchema(v []*KeySchemaElement) *LocalSecondaryIndex { - s.KeySchema = v - return s -} - -// SetProjection sets the Projection field's value. -func (s *LocalSecondaryIndex) SetProjection(v *Projection) *LocalSecondaryIndex { - s.Projection = v - return s -} - -// Represents the properties of a local secondary index. -type LocalSecondaryIndexDescription struct { - _ struct{} `type:"structure"` - - // The Amazon Resource Name (ARN) that uniquely identifies the index. - IndexArn *string `type:"string"` - - // Represents the name of the local secondary index. - IndexName *string `min:"3" type:"string"` - - // The total size of the specified index, in bytes. DynamoDB updates this value - // approximately every six hours. Recent changes might not be reflected in this - // value. - IndexSizeBytes *int64 `type:"long"` - - // The number of items in the specified index. DynamoDB updates this value approximately - // every six hours. Recent changes might not be reflected in this value. - ItemCount *int64 `type:"long"` - - // The complete key schema for the local secondary index, consisting of one - // or more pairs of attribute names and key types: - // - // * HASH - partition key - // - // * RANGE - sort key - // - // The partition key of an item is also known as its hash attribute. The term - // "hash attribute" derives from DynamoDB's usage of an internal hash function - // to evenly distribute data items across partitions, based on their partition - // key values. - // - // The sort key of an item is also known as its range attribute. The term "range - // attribute" derives from the way DynamoDB stores items with the same partition - // key physically close together, in sorted order by the sort key value. - KeySchema []*KeySchemaElement `min:"1" type:"list"` - - // Represents attributes that are copied (projected) from the table into the - // global secondary index. These are in addition to the primary key attributes - // and index key attributes, which are automatically projected. - Projection *Projection `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s LocalSecondaryIndexDescription) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s LocalSecondaryIndexDescription) GoString() string { - return s.String() -} - -// SetIndexArn sets the IndexArn field's value. -func (s *LocalSecondaryIndexDescription) SetIndexArn(v string) *LocalSecondaryIndexDescription { - s.IndexArn = &v - return s -} - -// SetIndexName sets the IndexName field's value. -func (s *LocalSecondaryIndexDescription) SetIndexName(v string) *LocalSecondaryIndexDescription { - s.IndexName = &v - return s -} - -// SetIndexSizeBytes sets the IndexSizeBytes field's value. -func (s *LocalSecondaryIndexDescription) SetIndexSizeBytes(v int64) *LocalSecondaryIndexDescription { - s.IndexSizeBytes = &v - return s -} - -// SetItemCount sets the ItemCount field's value. -func (s *LocalSecondaryIndexDescription) SetItemCount(v int64) *LocalSecondaryIndexDescription { - s.ItemCount = &v - return s -} - -// SetKeySchema sets the KeySchema field's value. -func (s *LocalSecondaryIndexDescription) SetKeySchema(v []*KeySchemaElement) *LocalSecondaryIndexDescription { - s.KeySchema = v - return s -} - -// SetProjection sets the Projection field's value. -func (s *LocalSecondaryIndexDescription) SetProjection(v *Projection) *LocalSecondaryIndexDescription { - s.Projection = v - return s -} - -// Represents the properties of a local secondary index for the table when the -// backup was created. -type LocalSecondaryIndexInfo struct { - _ struct{} `type:"structure"` - - // Represents the name of the local secondary index. - IndexName *string `min:"3" type:"string"` - - // The complete key schema for a local secondary index, which consists of one - // or more pairs of attribute names and key types: - // - // * HASH - partition key - // - // * RANGE - sort key - // - // The partition key of an item is also known as its hash attribute. The term - // "hash attribute" derives from DynamoDB's usage of an internal hash function - // to evenly distribute data items across partitions, based on their partition - // key values. - // - // The sort key of an item is also known as its range attribute. The term "range - // attribute" derives from the way DynamoDB stores items with the same partition - // key physically close together, in sorted order by the sort key value. - KeySchema []*KeySchemaElement `min:"1" type:"list"` - - // Represents attributes that are copied (projected) from the table into the - // global secondary index. These are in addition to the primary key attributes - // and index key attributes, which are automatically projected. - Projection *Projection `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s LocalSecondaryIndexInfo) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s LocalSecondaryIndexInfo) GoString() string { - return s.String() -} - -// SetIndexName sets the IndexName field's value. -func (s *LocalSecondaryIndexInfo) SetIndexName(v string) *LocalSecondaryIndexInfo { - s.IndexName = &v - return s -} - -// SetKeySchema sets the KeySchema field's value. -func (s *LocalSecondaryIndexInfo) SetKeySchema(v []*KeySchemaElement) *LocalSecondaryIndexInfo { - s.KeySchema = v - return s -} - -// SetProjection sets the Projection field's value. -func (s *LocalSecondaryIndexInfo) SetProjection(v *Projection) *LocalSecondaryIndexInfo { - s.Projection = v - return s -} - -// Sets the maximum number of read and write units for the specified on-demand -// table. If you use this parameter, you must specify MaxReadRequestUnits, MaxWriteRequestUnits, -// or both. -type OnDemandThroughput struct { - _ struct{} `type:"structure"` - - // Maximum number of read request units for the specified table. - // - // To specify a maximum OnDemandThroughput on your table, set the value of MaxReadRequestUnits - // as greater than or equal to 1. To remove the maximum OnDemandThroughput that - // is currently set on your table, set the value of MaxReadRequestUnits to -1. - MaxReadRequestUnits *int64 `type:"long"` - - // Maximum number of write request units for the specified table. - // - // To specify a maximum OnDemandThroughput on your table, set the value of MaxWriteRequestUnits - // as greater than or equal to 1. To remove the maximum OnDemandThroughput that - // is currently set on your table, set the value of MaxWriteRequestUnits to - // -1. - MaxWriteRequestUnits *int64 `type:"long"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s OnDemandThroughput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s OnDemandThroughput) GoString() string { - return s.String() -} - -// SetMaxReadRequestUnits sets the MaxReadRequestUnits field's value. -func (s *OnDemandThroughput) SetMaxReadRequestUnits(v int64) *OnDemandThroughput { - s.MaxReadRequestUnits = &v - return s -} - -// SetMaxWriteRequestUnits sets the MaxWriteRequestUnits field's value. -func (s *OnDemandThroughput) SetMaxWriteRequestUnits(v int64) *OnDemandThroughput { - s.MaxWriteRequestUnits = &v - return s -} - -// Overrides the on-demand throughput settings for this replica table. If you -// don't specify a value for this parameter, it uses the source table's on-demand -// throughput settings. -type OnDemandThroughputOverride struct { - _ struct{} `type:"structure"` - - // Maximum number of read request units for the specified replica table. - MaxReadRequestUnits *int64 `type:"long"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s OnDemandThroughputOverride) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s OnDemandThroughputOverride) GoString() string { - return s.String() -} - -// SetMaxReadRequestUnits sets the MaxReadRequestUnits field's value. -func (s *OnDemandThroughputOverride) SetMaxReadRequestUnits(v int64) *OnDemandThroughputOverride { - s.MaxReadRequestUnits = &v - return s -} - -// Represents a PartiQL statement that uses parameters. -type ParameterizedStatement struct { - _ struct{} `type:"structure"` - - // The parameter values. - Parameters []*AttributeValue `min:"1" type:"list"` - - // An optional parameter that returns the item attributes for a PartiQL ParameterizedStatement - // operation that failed a condition check. - // - // There is no additional cost associated with requesting a return value aside - // from the small network and processing overhead of receiving a larger response. - // No read capacity units are consumed. - ReturnValuesOnConditionCheckFailure *string `type:"string" enum:"ReturnValuesOnConditionCheckFailure"` - - // A PartiQL statement that uses parameters. - // - // Statement is a required field - Statement *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ParameterizedStatement) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ParameterizedStatement) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ParameterizedStatement) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ParameterizedStatement"} - if s.Parameters != nil && len(s.Parameters) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Parameters", 1)) - } - if s.Statement == nil { - invalidParams.Add(request.NewErrParamRequired("Statement")) - } - if s.Statement != nil && len(*s.Statement) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Statement", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetParameters sets the Parameters field's value. -func (s *ParameterizedStatement) SetParameters(v []*AttributeValue) *ParameterizedStatement { - s.Parameters = v - return s -} - -// SetReturnValuesOnConditionCheckFailure sets the ReturnValuesOnConditionCheckFailure field's value. -func (s *ParameterizedStatement) SetReturnValuesOnConditionCheckFailure(v string) *ParameterizedStatement { - s.ReturnValuesOnConditionCheckFailure = &v - return s -} - -// SetStatement sets the Statement field's value. -func (s *ParameterizedStatement) SetStatement(v string) *ParameterizedStatement { - s.Statement = &v - return s -} - -// The description of the point in time settings applied to the table. -type PointInTimeRecoveryDescription struct { - _ struct{} `type:"structure"` - - // Specifies the earliest point in time you can restore your table to. You can - // restore your table to any point in time during the last 35 days. - EarliestRestorableDateTime *time.Time `type:"timestamp"` - - // LatestRestorableDateTime is typically 5 minutes before the current time. - LatestRestorableDateTime *time.Time `type:"timestamp"` - - // The current state of point in time recovery: - // - // * ENABLED - Point in time recovery is enabled. - // - // * DISABLED - Point in time recovery is disabled. - PointInTimeRecoveryStatus *string `type:"string" enum:"PointInTimeRecoveryStatus"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PointInTimeRecoveryDescription) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PointInTimeRecoveryDescription) GoString() string { - return s.String() -} - -// SetEarliestRestorableDateTime sets the EarliestRestorableDateTime field's value. -func (s *PointInTimeRecoveryDescription) SetEarliestRestorableDateTime(v time.Time) *PointInTimeRecoveryDescription { - s.EarliestRestorableDateTime = &v - return s -} - -// SetLatestRestorableDateTime sets the LatestRestorableDateTime field's value. -func (s *PointInTimeRecoveryDescription) SetLatestRestorableDateTime(v time.Time) *PointInTimeRecoveryDescription { - s.LatestRestorableDateTime = &v - return s -} - -// SetPointInTimeRecoveryStatus sets the PointInTimeRecoveryStatus field's value. -func (s *PointInTimeRecoveryDescription) SetPointInTimeRecoveryStatus(v string) *PointInTimeRecoveryDescription { - s.PointInTimeRecoveryStatus = &v - return s -} - -// Represents the settings used to enable point in time recovery. -type PointInTimeRecoverySpecification struct { - _ struct{} `type:"structure"` - - // Indicates whether point in time recovery is enabled (true) or disabled (false) - // on the table. - // - // PointInTimeRecoveryEnabled is a required field - PointInTimeRecoveryEnabled *bool `type:"boolean" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PointInTimeRecoverySpecification) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PointInTimeRecoverySpecification) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PointInTimeRecoverySpecification) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PointInTimeRecoverySpecification"} - if s.PointInTimeRecoveryEnabled == nil { - invalidParams.Add(request.NewErrParamRequired("PointInTimeRecoveryEnabled")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetPointInTimeRecoveryEnabled sets the PointInTimeRecoveryEnabled field's value. -func (s *PointInTimeRecoverySpecification) SetPointInTimeRecoveryEnabled(v bool) *PointInTimeRecoverySpecification { - s.PointInTimeRecoveryEnabled = &v - return s -} - -// Point in time recovery has not yet been enabled for this source table. -type PointInTimeRecoveryUnavailableException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PointInTimeRecoveryUnavailableException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PointInTimeRecoveryUnavailableException) GoString() string { - return s.String() -} - -func newErrorPointInTimeRecoveryUnavailableException(v protocol.ResponseMetadata) error { - return &PointInTimeRecoveryUnavailableException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *PointInTimeRecoveryUnavailableException) Code() string { - return "PointInTimeRecoveryUnavailableException" -} - -// Message returns the exception's message. -func (s *PointInTimeRecoveryUnavailableException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *PointInTimeRecoveryUnavailableException) OrigErr() error { - return nil -} - -func (s *PointInTimeRecoveryUnavailableException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *PointInTimeRecoveryUnavailableException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *PointInTimeRecoveryUnavailableException) RequestID() string { - return s.RespMetadata.RequestID -} - -// The operation tried to access a nonexistent resource-based policy. -// -// If you specified an ExpectedRevisionId, it's possible that a policy is present -// for the resource but its revision ID didn't match the expected value. -type PolicyNotFoundException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PolicyNotFoundException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PolicyNotFoundException) GoString() string { - return s.String() -} - -func newErrorPolicyNotFoundException(v protocol.ResponseMetadata) error { - return &PolicyNotFoundException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *PolicyNotFoundException) Code() string { - return "PolicyNotFoundException" -} - -// Message returns the exception's message. -func (s *PolicyNotFoundException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *PolicyNotFoundException) OrigErr() error { - return nil -} - -func (s *PolicyNotFoundException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *PolicyNotFoundException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *PolicyNotFoundException) RequestID() string { - return s.RespMetadata.RequestID -} - -// Represents attributes that are copied (projected) from the table into an -// index. These are in addition to the primary key attributes and index key -// attributes, which are automatically projected. -type Projection struct { - _ struct{} `type:"structure"` - - // Represents the non-key attribute names which will be projected into the index. - // - // For local secondary indexes, the total count of NonKeyAttributes summed across - // all of the local secondary indexes, must not exceed 100. If you project the - // same attribute into two different indexes, this counts as two distinct attributes - // when determining the total. - NonKeyAttributes []*string `min:"1" type:"list"` - - // The set of attributes that are projected into the index: - // - // * KEYS_ONLY - Only the index and primary keys are projected into the index. - // - // * INCLUDE - In addition to the attributes described in KEYS_ONLY, the - // secondary index will include other non-key attributes that you specify. - // - // * ALL - All of the table attributes are projected into the index. - // - // When using the DynamoDB console, ALL is selected by default. - ProjectionType *string `type:"string" enum:"ProjectionType"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Projection) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Projection) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *Projection) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Projection"} - if s.NonKeyAttributes != nil && len(s.NonKeyAttributes) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NonKeyAttributes", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetNonKeyAttributes sets the NonKeyAttributes field's value. -func (s *Projection) SetNonKeyAttributes(v []*string) *Projection { - s.NonKeyAttributes = v - return s -} - -// SetProjectionType sets the ProjectionType field's value. -func (s *Projection) SetProjectionType(v string) *Projection { - s.ProjectionType = &v - return s -} - -// Represents the provisioned throughput settings for a specified table or index. -// The settings can be modified using the UpdateTable operation. -// -// For current minimum and maximum provisioned throughput values, see Service, -// Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) -// in the Amazon DynamoDB Developer Guide. -type ProvisionedThroughput struct { - _ struct{} `type:"structure"` - - // The maximum number of strongly consistent reads consumed per second before - // DynamoDB returns a ThrottlingException. For more information, see Specifying - // Read and Write Requirements (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughput.html) - // in the Amazon DynamoDB Developer Guide. - // - // If read/write capacity mode is PAY_PER_REQUEST the value is set to 0. - // - // ReadCapacityUnits is a required field - ReadCapacityUnits *int64 `min:"1" type:"long" required:"true"` - - // The maximum number of writes consumed per second before DynamoDB returns - // a ThrottlingException. For more information, see Specifying Read and Write - // Requirements (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughput.html) - // in the Amazon DynamoDB Developer Guide. - // - // If read/write capacity mode is PAY_PER_REQUEST the value is set to 0. - // - // WriteCapacityUnits is a required field - WriteCapacityUnits *int64 `min:"1" type:"long" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ProvisionedThroughput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ProvisionedThroughput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ProvisionedThroughput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ProvisionedThroughput"} - if s.ReadCapacityUnits == nil { - invalidParams.Add(request.NewErrParamRequired("ReadCapacityUnits")) - } - if s.ReadCapacityUnits != nil && *s.ReadCapacityUnits < 1 { - invalidParams.Add(request.NewErrParamMinValue("ReadCapacityUnits", 1)) - } - if s.WriteCapacityUnits == nil { - invalidParams.Add(request.NewErrParamRequired("WriteCapacityUnits")) - } - if s.WriteCapacityUnits != nil && *s.WriteCapacityUnits < 1 { - invalidParams.Add(request.NewErrParamMinValue("WriteCapacityUnits", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetReadCapacityUnits sets the ReadCapacityUnits field's value. -func (s *ProvisionedThroughput) SetReadCapacityUnits(v int64) *ProvisionedThroughput { - s.ReadCapacityUnits = &v - return s -} - -// SetWriteCapacityUnits sets the WriteCapacityUnits field's value. -func (s *ProvisionedThroughput) SetWriteCapacityUnits(v int64) *ProvisionedThroughput { - s.WriteCapacityUnits = &v - return s -} - -// Represents the provisioned throughput settings for the table, consisting -// of read and write capacity units, along with data about increases and decreases. -type ProvisionedThroughputDescription struct { - _ struct{} `type:"structure"` - - // The date and time of the last provisioned throughput decrease for this table. - LastDecreaseDateTime *time.Time `type:"timestamp"` - - // The date and time of the last provisioned throughput increase for this table. - LastIncreaseDateTime *time.Time `type:"timestamp"` - - // The number of provisioned throughput decreases for this table during this - // UTC calendar day. For current maximums on provisioned throughput decreases, - // see Service, Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) - // in the Amazon DynamoDB Developer Guide. - NumberOfDecreasesToday *int64 `min:"1" type:"long"` - - // The maximum number of strongly consistent reads consumed per second before - // DynamoDB returns a ThrottlingException. Eventually consistent reads require - // less effort than strongly consistent reads, so a setting of 50 ReadCapacityUnits - // per second provides 100 eventually consistent ReadCapacityUnits per second. - ReadCapacityUnits *int64 `type:"long"` - - // The maximum number of writes consumed per second before DynamoDB returns - // a ThrottlingException. - WriteCapacityUnits *int64 `type:"long"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ProvisionedThroughputDescription) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ProvisionedThroughputDescription) GoString() string { - return s.String() -} - -// SetLastDecreaseDateTime sets the LastDecreaseDateTime field's value. -func (s *ProvisionedThroughputDescription) SetLastDecreaseDateTime(v time.Time) *ProvisionedThroughputDescription { - s.LastDecreaseDateTime = &v - return s -} - -// SetLastIncreaseDateTime sets the LastIncreaseDateTime field's value. -func (s *ProvisionedThroughputDescription) SetLastIncreaseDateTime(v time.Time) *ProvisionedThroughputDescription { - s.LastIncreaseDateTime = &v - return s -} - -// SetNumberOfDecreasesToday sets the NumberOfDecreasesToday field's value. -func (s *ProvisionedThroughputDescription) SetNumberOfDecreasesToday(v int64) *ProvisionedThroughputDescription { - s.NumberOfDecreasesToday = &v - return s -} - -// SetReadCapacityUnits sets the ReadCapacityUnits field's value. -func (s *ProvisionedThroughputDescription) SetReadCapacityUnits(v int64) *ProvisionedThroughputDescription { - s.ReadCapacityUnits = &v - return s -} - -// SetWriteCapacityUnits sets the WriteCapacityUnits field's value. -func (s *ProvisionedThroughputDescription) SetWriteCapacityUnits(v int64) *ProvisionedThroughputDescription { - s.WriteCapacityUnits = &v - return s -} - -// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB -// automatically retry requests that receive this exception. Your request is -// eventually successful, unless your retry queue is too large to finish. Reduce -// the frequency of requests and use exponential backoff. For more information, -// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) -// in the Amazon DynamoDB Developer Guide. -type ProvisionedThroughputExceededException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - // You exceeded your maximum allowed provisioned throughput. - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ProvisionedThroughputExceededException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ProvisionedThroughputExceededException) GoString() string { - return s.String() -} - -func newErrorProvisionedThroughputExceededException(v protocol.ResponseMetadata) error { - return &ProvisionedThroughputExceededException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *ProvisionedThroughputExceededException) Code() string { - return "ProvisionedThroughputExceededException" -} - -// Message returns the exception's message. -func (s *ProvisionedThroughputExceededException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *ProvisionedThroughputExceededException) OrigErr() error { - return nil -} - -func (s *ProvisionedThroughputExceededException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *ProvisionedThroughputExceededException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *ProvisionedThroughputExceededException) RequestID() string { - return s.RespMetadata.RequestID -} - -// Replica-specific provisioned throughput settings. If not specified, uses -// the source table's provisioned throughput settings. -type ProvisionedThroughputOverride struct { - _ struct{} `type:"structure"` - - // Replica-specific read capacity units. If not specified, uses the source table's - // read capacity settings. - ReadCapacityUnits *int64 `min:"1" type:"long"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ProvisionedThroughputOverride) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ProvisionedThroughputOverride) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ProvisionedThroughputOverride) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ProvisionedThroughputOverride"} - if s.ReadCapacityUnits != nil && *s.ReadCapacityUnits < 1 { - invalidParams.Add(request.NewErrParamMinValue("ReadCapacityUnits", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetReadCapacityUnits sets the ReadCapacityUnits field's value. -func (s *ProvisionedThroughputOverride) SetReadCapacityUnits(v int64) *ProvisionedThroughputOverride { - s.ReadCapacityUnits = &v - return s -} - -// Represents a request to perform a PutItem operation. -type Put struct { - _ struct{} `type:"structure"` - - // A condition that must be satisfied in order for a conditional update to succeed. - ConditionExpression *string `type:"string"` - - // One or more substitution tokens for attribute names in an expression. - ExpressionAttributeNames map[string]*string `type:"map"` - - // One or more values that can be substituted in an expression. - ExpressionAttributeValues map[string]*AttributeValue `type:"map"` - - // A map of attribute name to attribute values, representing the primary key - // of the item to be written by PutItem. All of the table's primary key attributes - // must be specified, and their data types must match those of the table's key - // schema. If any attributes are present in the item that are part of an index - // key schema for the table, their types must match the index key schema. - // - // Item is a required field - Item map[string]*AttributeValue `type:"map" required:"true"` - - // Use ReturnValuesOnConditionCheckFailure to get the item attributes if the - // Put condition fails. For ReturnValuesOnConditionCheckFailure, the valid values - // are: NONE and ALL_OLD. - ReturnValuesOnConditionCheckFailure *string `type:"string" enum:"ReturnValuesOnConditionCheckFailure"` - - // Name of the table in which to write the item. You can also provide the Amazon - // Resource Name (ARN) of the table in this parameter. - // - // TableName is a required field - TableName *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Put) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Put) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *Put) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Put"} - if s.Item == nil { - invalidParams.Add(request.NewErrParamRequired("Item")) - } - if s.TableName == nil { - invalidParams.Add(request.NewErrParamRequired("TableName")) - } - if s.TableName != nil && len(*s.TableName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetConditionExpression sets the ConditionExpression field's value. -func (s *Put) SetConditionExpression(v string) *Put { - s.ConditionExpression = &v - return s -} - -// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value. -func (s *Put) SetExpressionAttributeNames(v map[string]*string) *Put { - s.ExpressionAttributeNames = v - return s -} - -// SetExpressionAttributeValues sets the ExpressionAttributeValues field's value. -func (s *Put) SetExpressionAttributeValues(v map[string]*AttributeValue) *Put { - s.ExpressionAttributeValues = v - return s -} - -// SetItem sets the Item field's value. -func (s *Put) SetItem(v map[string]*AttributeValue) *Put { - s.Item = v - return s -} - -// SetReturnValuesOnConditionCheckFailure sets the ReturnValuesOnConditionCheckFailure field's value. -func (s *Put) SetReturnValuesOnConditionCheckFailure(v string) *Put { - s.ReturnValuesOnConditionCheckFailure = &v - return s -} - -// SetTableName sets the TableName field's value. -func (s *Put) SetTableName(v string) *Put { - s.TableName = &v - return s -} - -// Represents the input of a PutItem operation. -type PutItemInput struct { - _ struct{} `type:"structure"` - - // A condition that must be satisfied in order for a conditional PutItem operation - // to succeed. - // - // An expression can contain any of the following: - // - // * Functions: attribute_exists | attribute_not_exists | attribute_type - // | contains | begins_with | size These function names are case-sensitive. - // - // * Comparison operators: = | <> | < | > | <= | >= | BETWEEN | IN - // - // * Logical operators: AND | OR | NOT - // - // For more information on condition expressions, see Condition Expressions - // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) - // in the Amazon DynamoDB Developer Guide. - ConditionExpression *string `type:"string"` - - // This is a legacy parameter. Use ConditionExpression instead. For more information, - // see ConditionalOperator (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html) - // in the Amazon DynamoDB Developer Guide. - ConditionalOperator *string `type:"string" enum:"ConditionalOperator"` - - // This is a legacy parameter. Use ConditionExpression instead. For more information, - // see Expected (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.Expected.html) - // in the Amazon DynamoDB Developer Guide. - Expected map[string]*ExpectedAttributeValue `type:"map"` - - // One or more substitution tokens for attribute names in an expression. The - // following are some use cases for using ExpressionAttributeNames: - // - // * To access an attribute whose name conflicts with a DynamoDB reserved - // word. - // - // * To create a placeholder for repeating occurrences of an attribute name - // in an expression. - // - // * To prevent special characters in an attribute name from being misinterpreted - // in an expression. - // - // Use the # character in an expression to dereference an attribute name. For - // example, consider the following attribute name: - // - // * Percentile - // - // The name of this attribute conflicts with a reserved word, so it cannot be - // used directly in an expression. (For the complete list of reserved words, - // see Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) - // in the Amazon DynamoDB Developer Guide). To work around this, you could specify - // the following for ExpressionAttributeNames: - // - // * {"#P":"Percentile"} - // - // You could then use this substitution in an expression, as in this example: - // - // * #P = :val - // - // Tokens that begin with the : character are expression attribute values, which - // are placeholders for the actual value at runtime. - // - // For more information on expression attribute names, see Specifying Item Attributes - // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) - // in the Amazon DynamoDB Developer Guide. - ExpressionAttributeNames map[string]*string `type:"map"` - - // One or more values that can be substituted in an expression. - // - // Use the : (colon) character in an expression to dereference an attribute - // value. For example, suppose that you wanted to check whether the value of - // the ProductStatus attribute was one of the following: - // - // Available | Backordered | Discontinued - // - // You would first need to specify ExpressionAttributeValues as follows: - // - // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"} - // } - // - // You could then use these values in an expression, such as this: - // - // ProductStatus IN (:avail, :back, :disc) - // - // For more information on expression attribute values, see Condition Expressions - // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) - // in the Amazon DynamoDB Developer Guide. - ExpressionAttributeValues map[string]*AttributeValue `type:"map"` - - // A map of attribute name/value pairs, one for each attribute. Only the primary - // key attributes are required; you can optionally provide other attribute name-value - // pairs for the item. - // - // You must provide all of the attributes for the primary key. For example, - // with a simple primary key, you only need to provide a value for the partition - // key. For a composite primary key, you must provide both values for both the - // partition key and the sort key. - // - // If you specify any attributes that are part of an index key, then the data - // types for those attributes must match those of the schema in the table's - // attribute definition. - // - // Empty String and Binary attribute values are allowed. Attribute values of - // type String and Binary must have a length greater than zero if the attribute - // is used as a key attribute for a table or index. - // - // For more information about primary keys, see Primary Key (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.CoreComponents.html#HowItWorks.CoreComponents.PrimaryKey) - // in the Amazon DynamoDB Developer Guide. - // - // Each element in the Item map is an AttributeValue object. - // - // Item is a required field - Item map[string]*AttributeValue `type:"map" required:"true"` - - // Determines the level of detail about either provisioned or on-demand throughput - // consumption that is returned in the response: - // - // * INDEXES - The response includes the aggregate ConsumedCapacity for the - // operation, together with ConsumedCapacity for each table and secondary - // index that was accessed. Note that some operations, such as GetItem and - // BatchGetItem, do not access any indexes at all. In these cases, specifying - // INDEXES will only return ConsumedCapacity information for table(s). - // - // * TOTAL - The response includes only the aggregate ConsumedCapacity for - // the operation. - // - // * NONE - No ConsumedCapacity details are included in the response. - ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` - - // Determines whether item collection metrics are returned. If set to SIZE, - // the response includes statistics about item collections, if any, that were - // modified during the operation are returned in the response. If set to NONE - // (the default), no statistics are returned. - ReturnItemCollectionMetrics *string `type:"string" enum:"ReturnItemCollectionMetrics"` - - // Use ReturnValues if you want to get the item attributes as they appeared - // before they were updated with the PutItem request. For PutItem, the valid - // values are: - // - // * NONE - If ReturnValues is not specified, or if its value is NONE, then - // nothing is returned. (This setting is the default for ReturnValues.) - // - // * ALL_OLD - If PutItem overwrote an attribute name-value pair, then the - // content of the old item is returned. - // - // The values returned are strongly consistent. - // - // There is no additional cost associated with requesting a return value aside - // from the small network and processing overhead of receiving a larger response. - // No read capacity units are consumed. - // - // The ReturnValues parameter is used by several DynamoDB operations; however, - // PutItem does not recognize any values other than NONE or ALL_OLD. - ReturnValues *string `type:"string" enum:"ReturnValue"` - - // An optional parameter that returns the item attributes for a PutItem operation - // that failed a condition check. - // - // There is no additional cost associated with requesting a return value aside - // from the small network and processing overhead of receiving a larger response. - // No read capacity units are consumed. - ReturnValuesOnConditionCheckFailure *string `type:"string" enum:"ReturnValuesOnConditionCheckFailure"` - - // The name of the table to contain the item. You can also provide the Amazon - // Resource Name (ARN) of the table in this parameter. - // - // TableName is a required field - TableName *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutItemInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutItemInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutItemInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutItemInput"} - if s.Item == nil { - invalidParams.Add(request.NewErrParamRequired("Item")) - } - if s.TableName == nil { - invalidParams.Add(request.NewErrParamRequired("TableName")) - } - if s.TableName != nil && len(*s.TableName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetConditionExpression sets the ConditionExpression field's value. -func (s *PutItemInput) SetConditionExpression(v string) *PutItemInput { - s.ConditionExpression = &v - return s -} - -// SetConditionalOperator sets the ConditionalOperator field's value. -func (s *PutItemInput) SetConditionalOperator(v string) *PutItemInput { - s.ConditionalOperator = &v - return s -} - -// SetExpected sets the Expected field's value. -func (s *PutItemInput) SetExpected(v map[string]*ExpectedAttributeValue) *PutItemInput { - s.Expected = v - return s -} - -// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value. -func (s *PutItemInput) SetExpressionAttributeNames(v map[string]*string) *PutItemInput { - s.ExpressionAttributeNames = v - return s -} - -// SetExpressionAttributeValues sets the ExpressionAttributeValues field's value. -func (s *PutItemInput) SetExpressionAttributeValues(v map[string]*AttributeValue) *PutItemInput { - s.ExpressionAttributeValues = v - return s -} - -// SetItem sets the Item field's value. -func (s *PutItemInput) SetItem(v map[string]*AttributeValue) *PutItemInput { - s.Item = v - return s -} - -// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value. -func (s *PutItemInput) SetReturnConsumedCapacity(v string) *PutItemInput { - s.ReturnConsumedCapacity = &v - return s -} - -// SetReturnItemCollectionMetrics sets the ReturnItemCollectionMetrics field's value. -func (s *PutItemInput) SetReturnItemCollectionMetrics(v string) *PutItemInput { - s.ReturnItemCollectionMetrics = &v - return s -} - -// SetReturnValues sets the ReturnValues field's value. -func (s *PutItemInput) SetReturnValues(v string) *PutItemInput { - s.ReturnValues = &v - return s -} - -// SetReturnValuesOnConditionCheckFailure sets the ReturnValuesOnConditionCheckFailure field's value. -func (s *PutItemInput) SetReturnValuesOnConditionCheckFailure(v string) *PutItemInput { - s.ReturnValuesOnConditionCheckFailure = &v - return s -} - -// SetTableName sets the TableName field's value. -func (s *PutItemInput) SetTableName(v string) *PutItemInput { - s.TableName = &v - return s -} - -// Represents the output of a PutItem operation. -type PutItemOutput struct { - _ struct{} `type:"structure"` - - // The attribute values as they appeared before the PutItem operation, but only - // if ReturnValues is specified as ALL_OLD in the request. Each element consists - // of an attribute name and an attribute value. - Attributes map[string]*AttributeValue `type:"map"` - - // The capacity units consumed by the PutItem operation. The data returned includes - // the total provisioned throughput consumed, along with statistics for the - // table and any indexes involved in the operation. ConsumedCapacity is only - // returned if the ReturnConsumedCapacity parameter was specified. For more - // information, see Capacity unity consumption for write operations (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/read-write-operations.html#write-operation-consumption) - // in the Amazon DynamoDB Developer Guide. - ConsumedCapacity *ConsumedCapacity `type:"structure"` - - // Information about item collections, if any, that were affected by the PutItem - // operation. ItemCollectionMetrics is only returned if the ReturnItemCollectionMetrics - // parameter was specified. If the table does not have any local secondary indexes, - // this information is not returned in the response. - // - // Each ItemCollectionMetrics element consists of: - // - // * ItemCollectionKey - The partition key value of the item collection. - // This is the same as the partition key value of the item itself. - // - // * SizeEstimateRangeGB - An estimate of item collection size, in gigabytes. - // This value is a two-element array containing a lower bound and an upper - // bound for the estimate. The estimate includes the size of all the items - // in the table, plus the size of all attributes projected into all of the - // local secondary indexes on that table. Use this estimate to measure whether - // a local secondary index is approaching its size limit. The estimate is - // subject to change over time; therefore, do not rely on the precision or - // accuracy of the estimate. - ItemCollectionMetrics *ItemCollectionMetrics `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutItemOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutItemOutput) GoString() string { - return s.String() -} - -// SetAttributes sets the Attributes field's value. -func (s *PutItemOutput) SetAttributes(v map[string]*AttributeValue) *PutItemOutput { - s.Attributes = v - return s -} - -// SetConsumedCapacity sets the ConsumedCapacity field's value. -func (s *PutItemOutput) SetConsumedCapacity(v *ConsumedCapacity) *PutItemOutput { - s.ConsumedCapacity = v - return s -} - -// SetItemCollectionMetrics sets the ItemCollectionMetrics field's value. -func (s *PutItemOutput) SetItemCollectionMetrics(v *ItemCollectionMetrics) *PutItemOutput { - s.ItemCollectionMetrics = v - return s -} - -// Represents a request to perform a PutItem operation on an item. -type PutRequest struct { - _ struct{} `type:"structure"` - - // A map of attribute name to attribute values, representing the primary key - // of an item to be processed by PutItem. All of the table's primary key attributes - // must be specified, and their data types must match those of the table's key - // schema. If any attributes are present in the item that are part of an index - // key schema for the table, their types must match the index key schema. - // - // Item is a required field - Item map[string]*AttributeValue `type:"map" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutRequest) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutRequest) GoString() string { - return s.String() -} - -// SetItem sets the Item field's value. -func (s *PutRequest) SetItem(v map[string]*AttributeValue) *PutRequest { - s.Item = v - return s -} - -type PutResourcePolicyInput struct { - _ struct{} `type:"structure"` - - // Set this parameter to true to confirm that you want to remove your permissions - // to change the policy of this resource in the future. - ConfirmRemoveSelfResourceAccess *bool `type:"boolean"` - - // A string value that you can use to conditionally update your policy. You - // can provide the revision ID of your existing policy to make mutating requests - // against that policy. - // - // When you provide an expected revision ID, if the revision ID of the existing - // policy on the resource doesn't match or if there's no policy attached to - // the resource, your request will be rejected with a PolicyNotFoundException. - // - // To conditionally attach a policy when no policy exists for the resource, - // specify NO_POLICY for the revision ID. - ExpectedRevisionId *string `min:"1" type:"string"` - - // An Amazon Web Services resource-based policy document in JSON format. - // - // * The maximum size supported for a resource-based policy document is 20 - // KB. DynamoDB counts whitespaces when calculating the size of a policy - // against this limit. - // - // * Within a resource-based policy, if the action for a DynamoDB service-linked - // role (SLR) to replicate data for a global table is denied, adding or deleting - // a replica will fail with an error. - // - // For a full list of all considerations that apply while attaching a resource-based - // policy, see Resource-based policy considerations (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/rbac-considerations.html). - // - // Policy is a required field - Policy *string `type:"string" required:"true"` - - // The Amazon Resource Name (ARN) of the DynamoDB resource to which the policy - // will be attached. The resources you can specify include tables and streams. - // - // You can control index permissions using the base table's policy. To specify - // the same permission level for your table and its indexes, you can provide - // both the table and index Amazon Resource Name (ARN)s in the Resource field - // of a given Statement in your policy document. Alternatively, to specify different - // permissions for your table, indexes, or both, you can define multiple Statement - // fields in your policy document. - // - // ResourceArn is a required field - ResourceArn *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutResourcePolicyInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutResourcePolicyInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutResourcePolicyInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutResourcePolicyInput"} - if s.ExpectedRevisionId != nil && len(*s.ExpectedRevisionId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ExpectedRevisionId", 1)) - } - if s.Policy == nil { - invalidParams.Add(request.NewErrParamRequired("Policy")) - } - if s.ResourceArn == nil { - invalidParams.Add(request.NewErrParamRequired("ResourceArn")) - } - if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetConfirmRemoveSelfResourceAccess sets the ConfirmRemoveSelfResourceAccess field's value. -func (s *PutResourcePolicyInput) SetConfirmRemoveSelfResourceAccess(v bool) *PutResourcePolicyInput { - s.ConfirmRemoveSelfResourceAccess = &v - return s -} - -// SetExpectedRevisionId sets the ExpectedRevisionId field's value. -func (s *PutResourcePolicyInput) SetExpectedRevisionId(v string) *PutResourcePolicyInput { - s.ExpectedRevisionId = &v - return s -} - -// SetPolicy sets the Policy field's value. -func (s *PutResourcePolicyInput) SetPolicy(v string) *PutResourcePolicyInput { - s.Policy = &v - return s -} - -// SetResourceArn sets the ResourceArn field's value. -func (s *PutResourcePolicyInput) SetResourceArn(v string) *PutResourcePolicyInput { - s.ResourceArn = &v - return s -} - -type PutResourcePolicyOutput struct { - _ struct{} `type:"structure"` - - // A unique string that represents the revision ID of the policy. If you're - // comparing revision IDs, make sure to always use string comparison logic. - RevisionId *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutResourcePolicyOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutResourcePolicyOutput) GoString() string { - return s.String() -} - -// SetRevisionId sets the RevisionId field's value. -func (s *PutResourcePolicyOutput) SetRevisionId(v string) *PutResourcePolicyOutput { - s.RevisionId = &v - return s -} - -// Represents the input of a Query operation. -type QueryInput struct { - _ struct{} `type:"structure"` - - // This is a legacy parameter. Use ProjectionExpression instead. For more information, - // see AttributesToGet (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributesToGet.html) - // in the Amazon DynamoDB Developer Guide. - AttributesToGet []*string `min:"1" type:"list"` - - // This is a legacy parameter. Use FilterExpression instead. For more information, - // see ConditionalOperator (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html) - // in the Amazon DynamoDB Developer Guide. - ConditionalOperator *string `type:"string" enum:"ConditionalOperator"` - - // Determines the read consistency model: If set to true, then the operation - // uses strongly consistent reads; otherwise, the operation uses eventually - // consistent reads. - // - // Strongly consistent reads are not supported on global secondary indexes. - // If you query a global secondary index with ConsistentRead set to true, you - // will receive a ValidationException. - ConsistentRead *bool `type:"boolean"` - - // The primary key of the first item that this operation will evaluate. Use - // the value that was returned for LastEvaluatedKey in the previous operation. - // - // The data type for ExclusiveStartKey must be String, Number, or Binary. No - // set data types are allowed. - ExclusiveStartKey map[string]*AttributeValue `type:"map"` - - // One or more substitution tokens for attribute names in an expression. The - // following are some use cases for using ExpressionAttributeNames: - // - // * To access an attribute whose name conflicts with a DynamoDB reserved - // word. - // - // * To create a placeholder for repeating occurrences of an attribute name - // in an expression. - // - // * To prevent special characters in an attribute name from being misinterpreted - // in an expression. - // - // Use the # character in an expression to dereference an attribute name. For - // example, consider the following attribute name: - // - // * Percentile - // - // The name of this attribute conflicts with a reserved word, so it cannot be - // used directly in an expression. (For the complete list of reserved words, - // see Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) - // in the Amazon DynamoDB Developer Guide). To work around this, you could specify - // the following for ExpressionAttributeNames: - // - // * {"#P":"Percentile"} - // - // You could then use this substitution in an expression, as in this example: - // - // * #P = :val - // - // Tokens that begin with the : character are expression attribute values, which - // are placeholders for the actual value at runtime. - // - // For more information on expression attribute names, see Specifying Item Attributes - // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) - // in the Amazon DynamoDB Developer Guide. - ExpressionAttributeNames map[string]*string `type:"map"` - - // One or more values that can be substituted in an expression. - // - // Use the : (colon) character in an expression to dereference an attribute - // value. For example, suppose that you wanted to check whether the value of - // the ProductStatus attribute was one of the following: - // - // Available | Backordered | Discontinued - // - // You would first need to specify ExpressionAttributeValues as follows: - // - // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"} - // } - // - // You could then use these values in an expression, such as this: - // - // ProductStatus IN (:avail, :back, :disc) - // - // For more information on expression attribute values, see Specifying Conditions - // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) - // in the Amazon DynamoDB Developer Guide. - ExpressionAttributeValues map[string]*AttributeValue `type:"map"` - - // A string that contains conditions that DynamoDB applies after the Query operation, - // but before the data is returned to you. Items that do not satisfy the FilterExpression - // criteria are not returned. - // - // A FilterExpression does not allow key attributes. You cannot define a filter - // expression based on a partition key or a sort key. - // - // A FilterExpression is applied after the items have already been read; the - // process of filtering does not consume any additional read capacity units. - // - // For more information, see Filter Expressions (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Query.FilterExpression.html) - // in the Amazon DynamoDB Developer Guide. - FilterExpression *string `type:"string"` - - // The name of an index to query. This index can be any local secondary index - // or global secondary index on the table. Note that if you use the IndexName - // parameter, you must also provide TableName. - IndexName *string `min:"3" type:"string"` - - // The condition that specifies the key values for items to be retrieved by - // the Query action. - // - // The condition must perform an equality test on a single partition key value. - // - // The condition can optionally perform one of several comparison tests on a - // single sort key value. This allows Query to retrieve one item with a given - // partition key value and sort key value, or several items that have the same - // partition key value but different sort key values. - // - // The partition key equality test is required, and must be specified in the - // following format: - // - // partitionKeyName = :partitionkeyval - // - // If you also want to provide a condition for the sort key, it must be combined - // using AND with the condition for the sort key. Following is an example, using - // the = comparison operator for the sort key: - // - // partitionKeyName = :partitionkeyval AND sortKeyName = :sortkeyval - // - // Valid comparisons for the sort key condition are as follows: - // - // * sortKeyName = :sortkeyval - true if the sort key value is equal to :sortkeyval. - // - // * sortKeyName < :sortkeyval - true if the sort key value is less than - // :sortkeyval. - // - // * sortKeyName <= :sortkeyval - true if the sort key value is less than - // or equal to :sortkeyval. - // - // * sortKeyName > :sortkeyval - true if the sort key value is greater than - // :sortkeyval. - // - // * sortKeyName >= :sortkeyval - true if the sort key value is greater than - // or equal to :sortkeyval. - // - // * sortKeyName BETWEEN :sortkeyval1 AND :sortkeyval2 - true if the sort - // key value is greater than or equal to :sortkeyval1, and less than or equal - // to :sortkeyval2. - // - // * begins_with ( sortKeyName, :sortkeyval ) - true if the sort key value - // begins with a particular operand. (You cannot use this function with a - // sort key that is of type Number.) Note that the function name begins_with - // is case-sensitive. - // - // Use the ExpressionAttributeValues parameter to replace tokens such as :partitionval - // and :sortval with actual values at runtime. - // - // You can optionally use the ExpressionAttributeNames parameter to replace - // the names of the partition key and sort key with placeholder tokens. This - // option might be necessary if an attribute name conflicts with a DynamoDB - // reserved word. For example, the following KeyConditionExpression parameter - // causes an error because Size is a reserved word: - // - // * Size = :myval - // - // To work around this, define a placeholder (such a #S) to represent the attribute - // name Size. KeyConditionExpression then is as follows: - // - // * #S = :myval - // - // For a list of reserved words, see Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) - // in the Amazon DynamoDB Developer Guide. - // - // For more information on ExpressionAttributeNames and ExpressionAttributeValues, - // see Using Placeholders for Attribute Names and Values (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ExpressionPlaceholders.html) - // in the Amazon DynamoDB Developer Guide. - KeyConditionExpression *string `type:"string"` - - // This is a legacy parameter. Use KeyConditionExpression instead. For more - // information, see KeyConditions (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.KeyConditions.html) - // in the Amazon DynamoDB Developer Guide. - KeyConditions map[string]*Condition `type:"map"` - - // The maximum number of items to evaluate (not necessarily the number of matching - // items). If DynamoDB processes the number of items up to the limit while processing - // the results, it stops the operation and returns the matching values up to - // that point, and a key in LastEvaluatedKey to apply in a subsequent operation, - // so that you can pick up where you left off. Also, if the processed dataset - // size exceeds 1 MB before DynamoDB reaches this limit, it stops the operation - // and returns the matching values up to the limit, and a key in LastEvaluatedKey - // to apply in a subsequent operation to continue the operation. For more information, - // see Query and Scan (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html) - // in the Amazon DynamoDB Developer Guide. - Limit *int64 `min:"1" type:"integer"` - - // A string that identifies one or more attributes to retrieve from the table. - // These attributes can include scalars, sets, or elements of a JSON document. - // The attributes in the expression must be separated by commas. - // - // If no attribute names are specified, then all attributes will be returned. - // If any of the requested attributes are not found, they will not appear in - // the result. - // - // For more information, see Accessing Item Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) - // in the Amazon DynamoDB Developer Guide. - ProjectionExpression *string `type:"string"` - - // This is a legacy parameter. Use FilterExpression instead. For more information, - // see QueryFilter (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.QueryFilter.html) - // in the Amazon DynamoDB Developer Guide. - QueryFilter map[string]*Condition `type:"map"` - - // Determines the level of detail about either provisioned or on-demand throughput - // consumption that is returned in the response: - // - // * INDEXES - The response includes the aggregate ConsumedCapacity for the - // operation, together with ConsumedCapacity for each table and secondary - // index that was accessed. Note that some operations, such as GetItem and - // BatchGetItem, do not access any indexes at all. In these cases, specifying - // INDEXES will only return ConsumedCapacity information for table(s). - // - // * TOTAL - The response includes only the aggregate ConsumedCapacity for - // the operation. - // - // * NONE - No ConsumedCapacity details are included in the response. - ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` - - // Specifies the order for index traversal: If true (default), the traversal - // is performed in ascending order; if false, the traversal is performed in - // descending order. - // - // Items with the same partition key value are stored in sorted order by sort - // key. If the sort key data type is Number, the results are stored in numeric - // order. For type String, the results are stored in order of UTF-8 bytes. For - // type Binary, DynamoDB treats each byte of the binary data as unsigned. - // - // If ScanIndexForward is true, DynamoDB returns the results in the order in - // which they are stored (by sort key value). This is the default behavior. - // If ScanIndexForward is false, DynamoDB reads the results in reverse order - // by sort key value, and then returns the results to the client. - ScanIndexForward *bool `type:"boolean"` - - // The attributes to be returned in the result. You can retrieve all item attributes, - // specific item attributes, the count of matching items, or in the case of - // an index, some or all of the attributes projected into the index. - // - // * ALL_ATTRIBUTES - Returns all of the item attributes from the specified - // table or index. If you query a local secondary index, then for each matching - // item in the index, DynamoDB fetches the entire item from the parent table. - // If the index is configured to project all item attributes, then all of - // the data can be obtained from the local secondary index, and no fetching - // is required. - // - // * ALL_PROJECTED_ATTRIBUTES - Allowed only when querying an index. Retrieves - // all attributes that have been projected into the index. If the index is - // configured to project all attributes, this return value is equivalent - // to specifying ALL_ATTRIBUTES. - // - // * COUNT - Returns the number of matching items, rather than the matching - // items themselves. Note that this uses the same quantity of read capacity - // units as getting the items, and is subject to the same item size calculations. - // - // * SPECIFIC_ATTRIBUTES - Returns only the attributes listed in ProjectionExpression. - // This return value is equivalent to specifying ProjectionExpression without - // specifying any value for Select. If you query or scan a local secondary - // index and request only attributes that are projected into that index, - // the operation will read only the index and not the table. If any of the - // requested attributes are not projected into the local secondary index, - // DynamoDB fetches each of these attributes from the parent table. This - // extra fetching incurs additional throughput cost and latency. If you query - // or scan a global secondary index, you can only request attributes that - // are projected into the index. Global secondary index queries cannot fetch - // attributes from the parent table. - // - // If neither Select nor ProjectionExpression are specified, DynamoDB defaults - // to ALL_ATTRIBUTES when accessing a table, and ALL_PROJECTED_ATTRIBUTES when - // accessing an index. You cannot use both Select and ProjectionExpression together - // in a single request, unless the value for Select is SPECIFIC_ATTRIBUTES. - // (This usage is equivalent to specifying ProjectionExpression without any - // value for Select.) - // - // If you use the ProjectionExpression parameter, then the value for Select - // can only be SPECIFIC_ATTRIBUTES. Any other value for Select will return an - // error. - Select *string `type:"string" enum:"Select"` - - // The name of the table containing the requested items. You can also provide - // the Amazon Resource Name (ARN) of the table in this parameter. - // - // TableName is a required field - TableName *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s QueryInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s QueryInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *QueryInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "QueryInput"} - if s.AttributesToGet != nil && len(s.AttributesToGet) < 1 { - invalidParams.Add(request.NewErrParamMinLen("AttributesToGet", 1)) - } - if s.IndexName != nil && len(*s.IndexName) < 3 { - invalidParams.Add(request.NewErrParamMinLen("IndexName", 3)) - } - if s.Limit != nil && *s.Limit < 1 { - invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) - } - if s.TableName == nil { - invalidParams.Add(request.NewErrParamRequired("TableName")) - } - if s.TableName != nil && len(*s.TableName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) - } - if s.KeyConditions != nil { - for i, v := range s.KeyConditions { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "KeyConditions", i), err.(request.ErrInvalidParams)) - } - } - } - if s.QueryFilter != nil { - for i, v := range s.QueryFilter { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "QueryFilter", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAttributesToGet sets the AttributesToGet field's value. -func (s *QueryInput) SetAttributesToGet(v []*string) *QueryInput { - s.AttributesToGet = v - return s -} - -// SetConditionalOperator sets the ConditionalOperator field's value. -func (s *QueryInput) SetConditionalOperator(v string) *QueryInput { - s.ConditionalOperator = &v - return s -} - -// SetConsistentRead sets the ConsistentRead field's value. -func (s *QueryInput) SetConsistentRead(v bool) *QueryInput { - s.ConsistentRead = &v - return s -} - -// SetExclusiveStartKey sets the ExclusiveStartKey field's value. -func (s *QueryInput) SetExclusiveStartKey(v map[string]*AttributeValue) *QueryInput { - s.ExclusiveStartKey = v - return s -} - -// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value. -func (s *QueryInput) SetExpressionAttributeNames(v map[string]*string) *QueryInput { - s.ExpressionAttributeNames = v - return s -} - -// SetExpressionAttributeValues sets the ExpressionAttributeValues field's value. -func (s *QueryInput) SetExpressionAttributeValues(v map[string]*AttributeValue) *QueryInput { - s.ExpressionAttributeValues = v - return s -} - -// SetFilterExpression sets the FilterExpression field's value. -func (s *QueryInput) SetFilterExpression(v string) *QueryInput { - s.FilterExpression = &v - return s -} - -// SetIndexName sets the IndexName field's value. -func (s *QueryInput) SetIndexName(v string) *QueryInput { - s.IndexName = &v - return s -} - -// SetKeyConditionExpression sets the KeyConditionExpression field's value. -func (s *QueryInput) SetKeyConditionExpression(v string) *QueryInput { - s.KeyConditionExpression = &v - return s -} - -// SetKeyConditions sets the KeyConditions field's value. -func (s *QueryInput) SetKeyConditions(v map[string]*Condition) *QueryInput { - s.KeyConditions = v - return s -} - -// SetLimit sets the Limit field's value. -func (s *QueryInput) SetLimit(v int64) *QueryInput { - s.Limit = &v - return s -} - -// SetProjectionExpression sets the ProjectionExpression field's value. -func (s *QueryInput) SetProjectionExpression(v string) *QueryInput { - s.ProjectionExpression = &v - return s -} - -// SetQueryFilter sets the QueryFilter field's value. -func (s *QueryInput) SetQueryFilter(v map[string]*Condition) *QueryInput { - s.QueryFilter = v - return s -} - -// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value. -func (s *QueryInput) SetReturnConsumedCapacity(v string) *QueryInput { - s.ReturnConsumedCapacity = &v - return s -} - -// SetScanIndexForward sets the ScanIndexForward field's value. -func (s *QueryInput) SetScanIndexForward(v bool) *QueryInput { - s.ScanIndexForward = &v - return s -} - -// SetSelect sets the Select field's value. -func (s *QueryInput) SetSelect(v string) *QueryInput { - s.Select = &v - return s -} - -// SetTableName sets the TableName field's value. -func (s *QueryInput) SetTableName(v string) *QueryInput { - s.TableName = &v - return s -} - -// Represents the output of a Query operation. -type QueryOutput struct { - _ struct{} `type:"structure"` - - // The capacity units consumed by the Query operation. The data returned includes - // the total provisioned throughput consumed, along with statistics for the - // table and any indexes involved in the operation. ConsumedCapacity is only - // returned if the ReturnConsumedCapacity parameter was specified. For more - // information, see Capacity unit consumption for read operations (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/read-write-operations.html#read-operation-consumption) - // in the Amazon DynamoDB Developer Guide. - ConsumedCapacity *ConsumedCapacity `type:"structure"` - - // The number of items in the response. - // - // If you used a QueryFilter in the request, then Count is the number of items - // returned after the filter was applied, and ScannedCount is the number of - // matching items before the filter was applied. - // - // If you did not use a filter in the request, then Count and ScannedCount are - // the same. - Count *int64 `type:"integer"` - - // An array of item attributes that match the query criteria. Each element in - // this array consists of an attribute name and the value for that attribute. - Items []map[string]*AttributeValue `type:"list"` - - // The primary key of the item where the operation stopped, inclusive of the - // previous result set. Use this value to start a new operation, excluding this - // value in the new request. - // - // If LastEvaluatedKey is empty, then the "last page" of results has been processed - // and there is no more data to be retrieved. - // - // If LastEvaluatedKey is not empty, it does not necessarily mean that there - // is more data in the result set. The only way to know when you have reached - // the end of the result set is when LastEvaluatedKey is empty. - LastEvaluatedKey map[string]*AttributeValue `type:"map"` - - // The number of items evaluated, before any QueryFilter is applied. A high - // ScannedCount value with few, or no, Count results indicates an inefficient - // Query operation. For more information, see Count and ScannedCount (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Scan.html#Scan.Count) - // in the Amazon DynamoDB Developer Guide. - // - // If you did not use a filter in the request, then ScannedCount is the same - // as Count. - ScannedCount *int64 `type:"integer"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s QueryOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s QueryOutput) GoString() string { - return s.String() -} - -// SetConsumedCapacity sets the ConsumedCapacity field's value. -func (s *QueryOutput) SetConsumedCapacity(v *ConsumedCapacity) *QueryOutput { - s.ConsumedCapacity = v - return s -} - -// SetCount sets the Count field's value. -func (s *QueryOutput) SetCount(v int64) *QueryOutput { - s.Count = &v - return s -} - -// SetItems sets the Items field's value. -func (s *QueryOutput) SetItems(v []map[string]*AttributeValue) *QueryOutput { - s.Items = v - return s -} - -// SetLastEvaluatedKey sets the LastEvaluatedKey field's value. -func (s *QueryOutput) SetLastEvaluatedKey(v map[string]*AttributeValue) *QueryOutput { - s.LastEvaluatedKey = v - return s -} - -// SetScannedCount sets the ScannedCount field's value. -func (s *QueryOutput) SetScannedCount(v int64) *QueryOutput { - s.ScannedCount = &v - return s -} - -// Represents the properties of a replica. -type Replica struct { - _ struct{} `type:"structure"` - - // The Region where the replica needs to be created. - RegionName *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Replica) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Replica) GoString() string { - return s.String() -} - -// SetRegionName sets the RegionName field's value. -func (s *Replica) SetRegionName(v string) *Replica { - s.RegionName = &v - return s -} - -// The specified replica is already part of the global table. -type ReplicaAlreadyExistsException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ReplicaAlreadyExistsException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ReplicaAlreadyExistsException) GoString() string { - return s.String() -} - -func newErrorReplicaAlreadyExistsException(v protocol.ResponseMetadata) error { - return &ReplicaAlreadyExistsException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *ReplicaAlreadyExistsException) Code() string { - return "ReplicaAlreadyExistsException" -} - -// Message returns the exception's message. -func (s *ReplicaAlreadyExistsException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *ReplicaAlreadyExistsException) OrigErr() error { - return nil -} - -func (s *ReplicaAlreadyExistsException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *ReplicaAlreadyExistsException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *ReplicaAlreadyExistsException) RequestID() string { - return s.RespMetadata.RequestID -} - -// Represents the auto scaling settings of the replica. -type ReplicaAutoScalingDescription struct { - _ struct{} `type:"structure"` - - // Replica-specific global secondary index auto scaling settings. - GlobalSecondaryIndexes []*ReplicaGlobalSecondaryIndexAutoScalingDescription `type:"list"` - - // The Region where the replica exists. - RegionName *string `type:"string"` - - // Represents the auto scaling settings for a global table or global secondary - // index. - ReplicaProvisionedReadCapacityAutoScalingSettings *AutoScalingSettingsDescription `type:"structure"` - - // Represents the auto scaling settings for a global table or global secondary - // index. - ReplicaProvisionedWriteCapacityAutoScalingSettings *AutoScalingSettingsDescription `type:"structure"` - - // The current state of the replica: - // - // * CREATING - The replica is being created. - // - // * UPDATING - The replica is being updated. - // - // * DELETING - The replica is being deleted. - // - // * ACTIVE - The replica is ready for use. - ReplicaStatus *string `type:"string" enum:"ReplicaStatus"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ReplicaAutoScalingDescription) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ReplicaAutoScalingDescription) GoString() string { - return s.String() -} - -// SetGlobalSecondaryIndexes sets the GlobalSecondaryIndexes field's value. -func (s *ReplicaAutoScalingDescription) SetGlobalSecondaryIndexes(v []*ReplicaGlobalSecondaryIndexAutoScalingDescription) *ReplicaAutoScalingDescription { - s.GlobalSecondaryIndexes = v - return s -} - -// SetRegionName sets the RegionName field's value. -func (s *ReplicaAutoScalingDescription) SetRegionName(v string) *ReplicaAutoScalingDescription { - s.RegionName = &v - return s -} - -// SetReplicaProvisionedReadCapacityAutoScalingSettings sets the ReplicaProvisionedReadCapacityAutoScalingSettings field's value. -func (s *ReplicaAutoScalingDescription) SetReplicaProvisionedReadCapacityAutoScalingSettings(v *AutoScalingSettingsDescription) *ReplicaAutoScalingDescription { - s.ReplicaProvisionedReadCapacityAutoScalingSettings = v - return s -} - -// SetReplicaProvisionedWriteCapacityAutoScalingSettings sets the ReplicaProvisionedWriteCapacityAutoScalingSettings field's value. -func (s *ReplicaAutoScalingDescription) SetReplicaProvisionedWriteCapacityAutoScalingSettings(v *AutoScalingSettingsDescription) *ReplicaAutoScalingDescription { - s.ReplicaProvisionedWriteCapacityAutoScalingSettings = v - return s -} - -// SetReplicaStatus sets the ReplicaStatus field's value. -func (s *ReplicaAutoScalingDescription) SetReplicaStatus(v string) *ReplicaAutoScalingDescription { - s.ReplicaStatus = &v - return s -} - -// Represents the auto scaling settings of a replica that will be modified. -type ReplicaAutoScalingUpdate struct { - _ struct{} `type:"structure"` - - // The Region where the replica exists. - // - // RegionName is a required field - RegionName *string `type:"string" required:"true"` - - // Represents the auto scaling settings of global secondary indexes that will - // be modified. - ReplicaGlobalSecondaryIndexUpdates []*ReplicaGlobalSecondaryIndexAutoScalingUpdate `type:"list"` - - // Represents the auto scaling settings to be modified for a global table or - // global secondary index. - ReplicaProvisionedReadCapacityAutoScalingUpdate *AutoScalingSettingsUpdate `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ReplicaAutoScalingUpdate) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ReplicaAutoScalingUpdate) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ReplicaAutoScalingUpdate) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ReplicaAutoScalingUpdate"} - if s.RegionName == nil { - invalidParams.Add(request.NewErrParamRequired("RegionName")) - } - if s.ReplicaGlobalSecondaryIndexUpdates != nil { - for i, v := range s.ReplicaGlobalSecondaryIndexUpdates { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ReplicaGlobalSecondaryIndexUpdates", i), err.(request.ErrInvalidParams)) - } - } - } - if s.ReplicaProvisionedReadCapacityAutoScalingUpdate != nil { - if err := s.ReplicaProvisionedReadCapacityAutoScalingUpdate.Validate(); err != nil { - invalidParams.AddNested("ReplicaProvisionedReadCapacityAutoScalingUpdate", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetRegionName sets the RegionName field's value. -func (s *ReplicaAutoScalingUpdate) SetRegionName(v string) *ReplicaAutoScalingUpdate { - s.RegionName = &v - return s -} - -// SetReplicaGlobalSecondaryIndexUpdates sets the ReplicaGlobalSecondaryIndexUpdates field's value. -func (s *ReplicaAutoScalingUpdate) SetReplicaGlobalSecondaryIndexUpdates(v []*ReplicaGlobalSecondaryIndexAutoScalingUpdate) *ReplicaAutoScalingUpdate { - s.ReplicaGlobalSecondaryIndexUpdates = v - return s -} - -// SetReplicaProvisionedReadCapacityAutoScalingUpdate sets the ReplicaProvisionedReadCapacityAutoScalingUpdate field's value. -func (s *ReplicaAutoScalingUpdate) SetReplicaProvisionedReadCapacityAutoScalingUpdate(v *AutoScalingSettingsUpdate) *ReplicaAutoScalingUpdate { - s.ReplicaProvisionedReadCapacityAutoScalingUpdate = v - return s -} - -// Contains the details of the replica. -type ReplicaDescription struct { - _ struct{} `type:"structure"` - - // Replica-specific global secondary index settings. - GlobalSecondaryIndexes []*ReplicaGlobalSecondaryIndexDescription `type:"list"` - - // The KMS key of the replica that will be used for KMS encryption. - KMSMasterKeyId *string `type:"string"` - - // Overrides the maximum on-demand throughput settings for the specified replica - // table. - OnDemandThroughputOverride *OnDemandThroughputOverride `type:"structure"` - - // Replica-specific provisioned throughput. If not described, uses the source - // table's provisioned throughput settings. - ProvisionedThroughputOverride *ProvisionedThroughputOverride `type:"structure"` - - // The name of the Region. - RegionName *string `type:"string"` - - // The time at which the replica was first detected as inaccessible. To determine - // cause of inaccessibility check the ReplicaStatus property. - ReplicaInaccessibleDateTime *time.Time `type:"timestamp"` - - // The current state of the replica: - // - // * CREATING - The replica is being created. - // - // * UPDATING - The replica is being updated. - // - // * DELETING - The replica is being deleted. - // - // * ACTIVE - The replica is ready for use. - // - // * REGION_DISABLED - The replica is inaccessible because the Amazon Web - // Services Region has been disabled. If the Amazon Web Services Region remains - // inaccessible for more than 20 hours, DynamoDB will remove this replica - // from the replication group. The replica will not be deleted and replication - // will stop from and to this region. - // - // * INACCESSIBLE_ENCRYPTION_CREDENTIALS - The KMS key used to encrypt the - // table is inaccessible. If the KMS key remains inaccessible for more than - // 20 hours, DynamoDB will remove this replica from the replication group. - // The replica will not be deleted and replication will stop from and to - // this region. - ReplicaStatus *string `type:"string" enum:"ReplicaStatus"` - - // Detailed information about the replica status. - ReplicaStatusDescription *string `type:"string"` - - // Specifies the progress of a Create, Update, or Delete action on the replica - // as a percentage. - ReplicaStatusPercentProgress *string `type:"string"` - - // Contains details of the table class. - ReplicaTableClassSummary *TableClassSummary `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ReplicaDescription) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ReplicaDescription) GoString() string { - return s.String() -} - -// SetGlobalSecondaryIndexes sets the GlobalSecondaryIndexes field's value. -func (s *ReplicaDescription) SetGlobalSecondaryIndexes(v []*ReplicaGlobalSecondaryIndexDescription) *ReplicaDescription { - s.GlobalSecondaryIndexes = v - return s -} - -// SetKMSMasterKeyId sets the KMSMasterKeyId field's value. -func (s *ReplicaDescription) SetKMSMasterKeyId(v string) *ReplicaDescription { - s.KMSMasterKeyId = &v - return s -} - -// SetOnDemandThroughputOverride sets the OnDemandThroughputOverride field's value. -func (s *ReplicaDescription) SetOnDemandThroughputOverride(v *OnDemandThroughputOverride) *ReplicaDescription { - s.OnDemandThroughputOverride = v - return s -} - -// SetProvisionedThroughputOverride sets the ProvisionedThroughputOverride field's value. -func (s *ReplicaDescription) SetProvisionedThroughputOverride(v *ProvisionedThroughputOverride) *ReplicaDescription { - s.ProvisionedThroughputOverride = v - return s -} - -// SetRegionName sets the RegionName field's value. -func (s *ReplicaDescription) SetRegionName(v string) *ReplicaDescription { - s.RegionName = &v - return s -} - -// SetReplicaInaccessibleDateTime sets the ReplicaInaccessibleDateTime field's value. -func (s *ReplicaDescription) SetReplicaInaccessibleDateTime(v time.Time) *ReplicaDescription { - s.ReplicaInaccessibleDateTime = &v - return s -} - -// SetReplicaStatus sets the ReplicaStatus field's value. -func (s *ReplicaDescription) SetReplicaStatus(v string) *ReplicaDescription { - s.ReplicaStatus = &v - return s -} - -// SetReplicaStatusDescription sets the ReplicaStatusDescription field's value. -func (s *ReplicaDescription) SetReplicaStatusDescription(v string) *ReplicaDescription { - s.ReplicaStatusDescription = &v - return s -} - -// SetReplicaStatusPercentProgress sets the ReplicaStatusPercentProgress field's value. -func (s *ReplicaDescription) SetReplicaStatusPercentProgress(v string) *ReplicaDescription { - s.ReplicaStatusPercentProgress = &v - return s -} - -// SetReplicaTableClassSummary sets the ReplicaTableClassSummary field's value. -func (s *ReplicaDescription) SetReplicaTableClassSummary(v *TableClassSummary) *ReplicaDescription { - s.ReplicaTableClassSummary = v - return s -} - -// Represents the properties of a replica global secondary index. -type ReplicaGlobalSecondaryIndex struct { - _ struct{} `type:"structure"` - - // The name of the global secondary index. - // - // IndexName is a required field - IndexName *string `min:"3" type:"string" required:"true"` - - // Overrides the maximum on-demand throughput settings for the specified global - // secondary index in the specified replica table. - OnDemandThroughputOverride *OnDemandThroughputOverride `type:"structure"` - - // Replica table GSI-specific provisioned throughput. If not specified, uses - // the source table GSI's read capacity settings. - ProvisionedThroughputOverride *ProvisionedThroughputOverride `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ReplicaGlobalSecondaryIndex) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ReplicaGlobalSecondaryIndex) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ReplicaGlobalSecondaryIndex) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ReplicaGlobalSecondaryIndex"} - if s.IndexName == nil { - invalidParams.Add(request.NewErrParamRequired("IndexName")) - } - if s.IndexName != nil && len(*s.IndexName) < 3 { - invalidParams.Add(request.NewErrParamMinLen("IndexName", 3)) - } - if s.ProvisionedThroughputOverride != nil { - if err := s.ProvisionedThroughputOverride.Validate(); err != nil { - invalidParams.AddNested("ProvisionedThroughputOverride", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetIndexName sets the IndexName field's value. -func (s *ReplicaGlobalSecondaryIndex) SetIndexName(v string) *ReplicaGlobalSecondaryIndex { - s.IndexName = &v - return s -} - -// SetOnDemandThroughputOverride sets the OnDemandThroughputOverride field's value. -func (s *ReplicaGlobalSecondaryIndex) SetOnDemandThroughputOverride(v *OnDemandThroughputOverride) *ReplicaGlobalSecondaryIndex { - s.OnDemandThroughputOverride = v - return s -} - -// SetProvisionedThroughputOverride sets the ProvisionedThroughputOverride field's value. -func (s *ReplicaGlobalSecondaryIndex) SetProvisionedThroughputOverride(v *ProvisionedThroughputOverride) *ReplicaGlobalSecondaryIndex { - s.ProvisionedThroughputOverride = v - return s -} - -// Represents the auto scaling configuration for a replica global secondary -// index. -type ReplicaGlobalSecondaryIndexAutoScalingDescription struct { - _ struct{} `type:"structure"` - - // The name of the global secondary index. - IndexName *string `min:"3" type:"string"` - - // The current state of the replica global secondary index: - // - // * CREATING - The index is being created. - // - // * UPDATING - The table/index configuration is being updated. The table/index - // remains available for data operations when UPDATING - // - // * DELETING - The index is being deleted. - // - // * ACTIVE - The index is ready for use. - IndexStatus *string `type:"string" enum:"IndexStatus"` - - // Represents the auto scaling settings for a global table or global secondary - // index. - ProvisionedReadCapacityAutoScalingSettings *AutoScalingSettingsDescription `type:"structure"` - - // Represents the auto scaling settings for a global table or global secondary - // index. - ProvisionedWriteCapacityAutoScalingSettings *AutoScalingSettingsDescription `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ReplicaGlobalSecondaryIndexAutoScalingDescription) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ReplicaGlobalSecondaryIndexAutoScalingDescription) GoString() string { - return s.String() -} - -// SetIndexName sets the IndexName field's value. -func (s *ReplicaGlobalSecondaryIndexAutoScalingDescription) SetIndexName(v string) *ReplicaGlobalSecondaryIndexAutoScalingDescription { - s.IndexName = &v - return s -} - -// SetIndexStatus sets the IndexStatus field's value. -func (s *ReplicaGlobalSecondaryIndexAutoScalingDescription) SetIndexStatus(v string) *ReplicaGlobalSecondaryIndexAutoScalingDescription { - s.IndexStatus = &v - return s -} - -// SetProvisionedReadCapacityAutoScalingSettings sets the ProvisionedReadCapacityAutoScalingSettings field's value. -func (s *ReplicaGlobalSecondaryIndexAutoScalingDescription) SetProvisionedReadCapacityAutoScalingSettings(v *AutoScalingSettingsDescription) *ReplicaGlobalSecondaryIndexAutoScalingDescription { - s.ProvisionedReadCapacityAutoScalingSettings = v - return s -} - -// SetProvisionedWriteCapacityAutoScalingSettings sets the ProvisionedWriteCapacityAutoScalingSettings field's value. -func (s *ReplicaGlobalSecondaryIndexAutoScalingDescription) SetProvisionedWriteCapacityAutoScalingSettings(v *AutoScalingSettingsDescription) *ReplicaGlobalSecondaryIndexAutoScalingDescription { - s.ProvisionedWriteCapacityAutoScalingSettings = v - return s -} - -// Represents the auto scaling settings of a global secondary index for a replica -// that will be modified. -type ReplicaGlobalSecondaryIndexAutoScalingUpdate struct { - _ struct{} `type:"structure"` - - // The name of the global secondary index. - IndexName *string `min:"3" type:"string"` - - // Represents the auto scaling settings to be modified for a global table or - // global secondary index. - ProvisionedReadCapacityAutoScalingUpdate *AutoScalingSettingsUpdate `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ReplicaGlobalSecondaryIndexAutoScalingUpdate) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ReplicaGlobalSecondaryIndexAutoScalingUpdate) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ReplicaGlobalSecondaryIndexAutoScalingUpdate) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ReplicaGlobalSecondaryIndexAutoScalingUpdate"} - if s.IndexName != nil && len(*s.IndexName) < 3 { - invalidParams.Add(request.NewErrParamMinLen("IndexName", 3)) - } - if s.ProvisionedReadCapacityAutoScalingUpdate != nil { - if err := s.ProvisionedReadCapacityAutoScalingUpdate.Validate(); err != nil { - invalidParams.AddNested("ProvisionedReadCapacityAutoScalingUpdate", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetIndexName sets the IndexName field's value. -func (s *ReplicaGlobalSecondaryIndexAutoScalingUpdate) SetIndexName(v string) *ReplicaGlobalSecondaryIndexAutoScalingUpdate { - s.IndexName = &v - return s -} - -// SetProvisionedReadCapacityAutoScalingUpdate sets the ProvisionedReadCapacityAutoScalingUpdate field's value. -func (s *ReplicaGlobalSecondaryIndexAutoScalingUpdate) SetProvisionedReadCapacityAutoScalingUpdate(v *AutoScalingSettingsUpdate) *ReplicaGlobalSecondaryIndexAutoScalingUpdate { - s.ProvisionedReadCapacityAutoScalingUpdate = v - return s -} - -// Represents the properties of a replica global secondary index. -type ReplicaGlobalSecondaryIndexDescription struct { - _ struct{} `type:"structure"` - - // The name of the global secondary index. - IndexName *string `min:"3" type:"string"` - - // Overrides the maximum on-demand throughput for the specified global secondary - // index in the specified replica table. - OnDemandThroughputOverride *OnDemandThroughputOverride `type:"structure"` - - // If not described, uses the source table GSI's read capacity settings. - ProvisionedThroughputOverride *ProvisionedThroughputOverride `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ReplicaGlobalSecondaryIndexDescription) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ReplicaGlobalSecondaryIndexDescription) GoString() string { - return s.String() -} - -// SetIndexName sets the IndexName field's value. -func (s *ReplicaGlobalSecondaryIndexDescription) SetIndexName(v string) *ReplicaGlobalSecondaryIndexDescription { - s.IndexName = &v - return s -} - -// SetOnDemandThroughputOverride sets the OnDemandThroughputOverride field's value. -func (s *ReplicaGlobalSecondaryIndexDescription) SetOnDemandThroughputOverride(v *OnDemandThroughputOverride) *ReplicaGlobalSecondaryIndexDescription { - s.OnDemandThroughputOverride = v - return s -} - -// SetProvisionedThroughputOverride sets the ProvisionedThroughputOverride field's value. -func (s *ReplicaGlobalSecondaryIndexDescription) SetProvisionedThroughputOverride(v *ProvisionedThroughputOverride) *ReplicaGlobalSecondaryIndexDescription { - s.ProvisionedThroughputOverride = v - return s -} - -// Represents the properties of a global secondary index. -type ReplicaGlobalSecondaryIndexSettingsDescription struct { - _ struct{} `type:"structure"` - - // The name of the global secondary index. The name must be unique among all - // other indexes on this table. - // - // IndexName is a required field - IndexName *string `min:"3" type:"string" required:"true"` - - // The current status of the global secondary index: - // - // * CREATING - The global secondary index is being created. - // - // * UPDATING - The global secondary index is being updated. - // - // * DELETING - The global secondary index is being deleted. - // - // * ACTIVE - The global secondary index is ready for use. - IndexStatus *string `type:"string" enum:"IndexStatus"` - - // Auto scaling settings for a global secondary index replica's read capacity - // units. - ProvisionedReadCapacityAutoScalingSettings *AutoScalingSettingsDescription `type:"structure"` - - // The maximum number of strongly consistent reads consumed per second before - // DynamoDB returns a ThrottlingException. - ProvisionedReadCapacityUnits *int64 `min:"1" type:"long"` - - // Auto scaling settings for a global secondary index replica's write capacity - // units. - ProvisionedWriteCapacityAutoScalingSettings *AutoScalingSettingsDescription `type:"structure"` - - // The maximum number of writes consumed per second before DynamoDB returns - // a ThrottlingException. - ProvisionedWriteCapacityUnits *int64 `min:"1" type:"long"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ReplicaGlobalSecondaryIndexSettingsDescription) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ReplicaGlobalSecondaryIndexSettingsDescription) GoString() string { - return s.String() -} - -// SetIndexName sets the IndexName field's value. -func (s *ReplicaGlobalSecondaryIndexSettingsDescription) SetIndexName(v string) *ReplicaGlobalSecondaryIndexSettingsDescription { - s.IndexName = &v - return s -} - -// SetIndexStatus sets the IndexStatus field's value. -func (s *ReplicaGlobalSecondaryIndexSettingsDescription) SetIndexStatus(v string) *ReplicaGlobalSecondaryIndexSettingsDescription { - s.IndexStatus = &v - return s -} - -// SetProvisionedReadCapacityAutoScalingSettings sets the ProvisionedReadCapacityAutoScalingSettings field's value. -func (s *ReplicaGlobalSecondaryIndexSettingsDescription) SetProvisionedReadCapacityAutoScalingSettings(v *AutoScalingSettingsDescription) *ReplicaGlobalSecondaryIndexSettingsDescription { - s.ProvisionedReadCapacityAutoScalingSettings = v - return s -} - -// SetProvisionedReadCapacityUnits sets the ProvisionedReadCapacityUnits field's value. -func (s *ReplicaGlobalSecondaryIndexSettingsDescription) SetProvisionedReadCapacityUnits(v int64) *ReplicaGlobalSecondaryIndexSettingsDescription { - s.ProvisionedReadCapacityUnits = &v - return s -} - -// SetProvisionedWriteCapacityAutoScalingSettings sets the ProvisionedWriteCapacityAutoScalingSettings field's value. -func (s *ReplicaGlobalSecondaryIndexSettingsDescription) SetProvisionedWriteCapacityAutoScalingSettings(v *AutoScalingSettingsDescription) *ReplicaGlobalSecondaryIndexSettingsDescription { - s.ProvisionedWriteCapacityAutoScalingSettings = v - return s -} - -// SetProvisionedWriteCapacityUnits sets the ProvisionedWriteCapacityUnits field's value. -func (s *ReplicaGlobalSecondaryIndexSettingsDescription) SetProvisionedWriteCapacityUnits(v int64) *ReplicaGlobalSecondaryIndexSettingsDescription { - s.ProvisionedWriteCapacityUnits = &v - return s -} - -// Represents the settings of a global secondary index for a global table that -// will be modified. -type ReplicaGlobalSecondaryIndexSettingsUpdate struct { - _ struct{} `type:"structure"` - - // The name of the global secondary index. The name must be unique among all - // other indexes on this table. - // - // IndexName is a required field - IndexName *string `min:"3" type:"string" required:"true"` - - // Auto scaling settings for managing a global secondary index replica's read - // capacity units. - ProvisionedReadCapacityAutoScalingSettingsUpdate *AutoScalingSettingsUpdate `type:"structure"` - - // The maximum number of strongly consistent reads consumed per second before - // DynamoDB returns a ThrottlingException. - ProvisionedReadCapacityUnits *int64 `min:"1" type:"long"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ReplicaGlobalSecondaryIndexSettingsUpdate) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ReplicaGlobalSecondaryIndexSettingsUpdate) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ReplicaGlobalSecondaryIndexSettingsUpdate) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ReplicaGlobalSecondaryIndexSettingsUpdate"} - if s.IndexName == nil { - invalidParams.Add(request.NewErrParamRequired("IndexName")) - } - if s.IndexName != nil && len(*s.IndexName) < 3 { - invalidParams.Add(request.NewErrParamMinLen("IndexName", 3)) - } - if s.ProvisionedReadCapacityUnits != nil && *s.ProvisionedReadCapacityUnits < 1 { - invalidParams.Add(request.NewErrParamMinValue("ProvisionedReadCapacityUnits", 1)) - } - if s.ProvisionedReadCapacityAutoScalingSettingsUpdate != nil { - if err := s.ProvisionedReadCapacityAutoScalingSettingsUpdate.Validate(); err != nil { - invalidParams.AddNested("ProvisionedReadCapacityAutoScalingSettingsUpdate", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetIndexName sets the IndexName field's value. -func (s *ReplicaGlobalSecondaryIndexSettingsUpdate) SetIndexName(v string) *ReplicaGlobalSecondaryIndexSettingsUpdate { - s.IndexName = &v - return s -} - -// SetProvisionedReadCapacityAutoScalingSettingsUpdate sets the ProvisionedReadCapacityAutoScalingSettingsUpdate field's value. -func (s *ReplicaGlobalSecondaryIndexSettingsUpdate) SetProvisionedReadCapacityAutoScalingSettingsUpdate(v *AutoScalingSettingsUpdate) *ReplicaGlobalSecondaryIndexSettingsUpdate { - s.ProvisionedReadCapacityAutoScalingSettingsUpdate = v - return s -} - -// SetProvisionedReadCapacityUnits sets the ProvisionedReadCapacityUnits field's value. -func (s *ReplicaGlobalSecondaryIndexSettingsUpdate) SetProvisionedReadCapacityUnits(v int64) *ReplicaGlobalSecondaryIndexSettingsUpdate { - s.ProvisionedReadCapacityUnits = &v - return s -} - -// The specified replica is no longer part of the global table. -type ReplicaNotFoundException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ReplicaNotFoundException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ReplicaNotFoundException) GoString() string { - return s.String() -} - -func newErrorReplicaNotFoundException(v protocol.ResponseMetadata) error { - return &ReplicaNotFoundException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *ReplicaNotFoundException) Code() string { - return "ReplicaNotFoundException" -} - -// Message returns the exception's message. -func (s *ReplicaNotFoundException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *ReplicaNotFoundException) OrigErr() error { - return nil -} - -func (s *ReplicaNotFoundException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *ReplicaNotFoundException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *ReplicaNotFoundException) RequestID() string { - return s.RespMetadata.RequestID -} - -// Represents the properties of a replica. -type ReplicaSettingsDescription struct { - _ struct{} `type:"structure"` - - // The Region name of the replica. - // - // RegionName is a required field - RegionName *string `type:"string" required:"true"` - - // The read/write capacity mode of the replica. - ReplicaBillingModeSummary *BillingModeSummary `type:"structure"` - - // Replica global secondary index settings for the global table. - ReplicaGlobalSecondaryIndexSettings []*ReplicaGlobalSecondaryIndexSettingsDescription `type:"list"` - - // Auto scaling settings for a global table replica's read capacity units. - ReplicaProvisionedReadCapacityAutoScalingSettings *AutoScalingSettingsDescription `type:"structure"` - - // The maximum number of strongly consistent reads consumed per second before - // DynamoDB returns a ThrottlingException. For more information, see Specifying - // Read and Write Requirements (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#ProvisionedThroughput) - // in the Amazon DynamoDB Developer Guide. - ReplicaProvisionedReadCapacityUnits *int64 `type:"long"` - - // Auto scaling settings for a global table replica's write capacity units. - ReplicaProvisionedWriteCapacityAutoScalingSettings *AutoScalingSettingsDescription `type:"structure"` - - // The maximum number of writes consumed per second before DynamoDB returns - // a ThrottlingException. For more information, see Specifying Read and Write - // Requirements (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#ProvisionedThroughput) - // in the Amazon DynamoDB Developer Guide. - ReplicaProvisionedWriteCapacityUnits *int64 `type:"long"` - - // The current state of the Region: - // - // * CREATING - The Region is being created. - // - // * UPDATING - The Region is being updated. - // - // * DELETING - The Region is being deleted. - // - // * ACTIVE - The Region is ready for use. - ReplicaStatus *string `type:"string" enum:"ReplicaStatus"` - - // Contains details of the table class. - ReplicaTableClassSummary *TableClassSummary `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ReplicaSettingsDescription) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ReplicaSettingsDescription) GoString() string { - return s.String() -} - -// SetRegionName sets the RegionName field's value. -func (s *ReplicaSettingsDescription) SetRegionName(v string) *ReplicaSettingsDescription { - s.RegionName = &v - return s -} - -// SetReplicaBillingModeSummary sets the ReplicaBillingModeSummary field's value. -func (s *ReplicaSettingsDescription) SetReplicaBillingModeSummary(v *BillingModeSummary) *ReplicaSettingsDescription { - s.ReplicaBillingModeSummary = v - return s -} - -// SetReplicaGlobalSecondaryIndexSettings sets the ReplicaGlobalSecondaryIndexSettings field's value. -func (s *ReplicaSettingsDescription) SetReplicaGlobalSecondaryIndexSettings(v []*ReplicaGlobalSecondaryIndexSettingsDescription) *ReplicaSettingsDescription { - s.ReplicaGlobalSecondaryIndexSettings = v - return s -} - -// SetReplicaProvisionedReadCapacityAutoScalingSettings sets the ReplicaProvisionedReadCapacityAutoScalingSettings field's value. -func (s *ReplicaSettingsDescription) SetReplicaProvisionedReadCapacityAutoScalingSettings(v *AutoScalingSettingsDescription) *ReplicaSettingsDescription { - s.ReplicaProvisionedReadCapacityAutoScalingSettings = v - return s -} - -// SetReplicaProvisionedReadCapacityUnits sets the ReplicaProvisionedReadCapacityUnits field's value. -func (s *ReplicaSettingsDescription) SetReplicaProvisionedReadCapacityUnits(v int64) *ReplicaSettingsDescription { - s.ReplicaProvisionedReadCapacityUnits = &v - return s -} - -// SetReplicaProvisionedWriteCapacityAutoScalingSettings sets the ReplicaProvisionedWriteCapacityAutoScalingSettings field's value. -func (s *ReplicaSettingsDescription) SetReplicaProvisionedWriteCapacityAutoScalingSettings(v *AutoScalingSettingsDescription) *ReplicaSettingsDescription { - s.ReplicaProvisionedWriteCapacityAutoScalingSettings = v - return s -} - -// SetReplicaProvisionedWriteCapacityUnits sets the ReplicaProvisionedWriteCapacityUnits field's value. -func (s *ReplicaSettingsDescription) SetReplicaProvisionedWriteCapacityUnits(v int64) *ReplicaSettingsDescription { - s.ReplicaProvisionedWriteCapacityUnits = &v - return s -} - -// SetReplicaStatus sets the ReplicaStatus field's value. -func (s *ReplicaSettingsDescription) SetReplicaStatus(v string) *ReplicaSettingsDescription { - s.ReplicaStatus = &v - return s -} - -// SetReplicaTableClassSummary sets the ReplicaTableClassSummary field's value. -func (s *ReplicaSettingsDescription) SetReplicaTableClassSummary(v *TableClassSummary) *ReplicaSettingsDescription { - s.ReplicaTableClassSummary = v - return s -} - -// Represents the settings for a global table in a Region that will be modified. -type ReplicaSettingsUpdate struct { - _ struct{} `type:"structure"` - - // The Region of the replica to be added. - // - // RegionName is a required field - RegionName *string `type:"string" required:"true"` - - // Represents the settings of a global secondary index for a global table that - // will be modified. - ReplicaGlobalSecondaryIndexSettingsUpdate []*ReplicaGlobalSecondaryIndexSettingsUpdate `min:"1" type:"list"` - - // Auto scaling settings for managing a global table replica's read capacity - // units. - ReplicaProvisionedReadCapacityAutoScalingSettingsUpdate *AutoScalingSettingsUpdate `type:"structure"` - - // The maximum number of strongly consistent reads consumed per second before - // DynamoDB returns a ThrottlingException. For more information, see Specifying - // Read and Write Requirements (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#ProvisionedThroughput) - // in the Amazon DynamoDB Developer Guide. - ReplicaProvisionedReadCapacityUnits *int64 `min:"1" type:"long"` - - // Replica-specific table class. If not specified, uses the source table's table - // class. - ReplicaTableClass *string `type:"string" enum:"TableClass"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ReplicaSettingsUpdate) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ReplicaSettingsUpdate) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ReplicaSettingsUpdate) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ReplicaSettingsUpdate"} - if s.RegionName == nil { - invalidParams.Add(request.NewErrParamRequired("RegionName")) - } - if s.ReplicaGlobalSecondaryIndexSettingsUpdate != nil && len(s.ReplicaGlobalSecondaryIndexSettingsUpdate) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ReplicaGlobalSecondaryIndexSettingsUpdate", 1)) - } - if s.ReplicaProvisionedReadCapacityUnits != nil && *s.ReplicaProvisionedReadCapacityUnits < 1 { - invalidParams.Add(request.NewErrParamMinValue("ReplicaProvisionedReadCapacityUnits", 1)) - } - if s.ReplicaGlobalSecondaryIndexSettingsUpdate != nil { - for i, v := range s.ReplicaGlobalSecondaryIndexSettingsUpdate { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ReplicaGlobalSecondaryIndexSettingsUpdate", i), err.(request.ErrInvalidParams)) - } - } - } - if s.ReplicaProvisionedReadCapacityAutoScalingSettingsUpdate != nil { - if err := s.ReplicaProvisionedReadCapacityAutoScalingSettingsUpdate.Validate(); err != nil { - invalidParams.AddNested("ReplicaProvisionedReadCapacityAutoScalingSettingsUpdate", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetRegionName sets the RegionName field's value. -func (s *ReplicaSettingsUpdate) SetRegionName(v string) *ReplicaSettingsUpdate { - s.RegionName = &v - return s -} - -// SetReplicaGlobalSecondaryIndexSettingsUpdate sets the ReplicaGlobalSecondaryIndexSettingsUpdate field's value. -func (s *ReplicaSettingsUpdate) SetReplicaGlobalSecondaryIndexSettingsUpdate(v []*ReplicaGlobalSecondaryIndexSettingsUpdate) *ReplicaSettingsUpdate { - s.ReplicaGlobalSecondaryIndexSettingsUpdate = v - return s -} - -// SetReplicaProvisionedReadCapacityAutoScalingSettingsUpdate sets the ReplicaProvisionedReadCapacityAutoScalingSettingsUpdate field's value. -func (s *ReplicaSettingsUpdate) SetReplicaProvisionedReadCapacityAutoScalingSettingsUpdate(v *AutoScalingSettingsUpdate) *ReplicaSettingsUpdate { - s.ReplicaProvisionedReadCapacityAutoScalingSettingsUpdate = v - return s -} - -// SetReplicaProvisionedReadCapacityUnits sets the ReplicaProvisionedReadCapacityUnits field's value. -func (s *ReplicaSettingsUpdate) SetReplicaProvisionedReadCapacityUnits(v int64) *ReplicaSettingsUpdate { - s.ReplicaProvisionedReadCapacityUnits = &v - return s -} - -// SetReplicaTableClass sets the ReplicaTableClass field's value. -func (s *ReplicaSettingsUpdate) SetReplicaTableClass(v string) *ReplicaSettingsUpdate { - s.ReplicaTableClass = &v - return s -} - -// Represents one of the following: -// -// - A new replica to be added to an existing global table. -// -// - New parameters for an existing replica. -// -// - An existing replica to be removed from an existing global table. -type ReplicaUpdate struct { - _ struct{} `type:"structure"` - - // The parameters required for creating a replica on an existing global table. - Create *CreateReplicaAction `type:"structure"` - - // The name of the existing replica to be removed. - Delete *DeleteReplicaAction `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ReplicaUpdate) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ReplicaUpdate) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ReplicaUpdate) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ReplicaUpdate"} - if s.Create != nil { - if err := s.Create.Validate(); err != nil { - invalidParams.AddNested("Create", err.(request.ErrInvalidParams)) - } - } - if s.Delete != nil { - if err := s.Delete.Validate(); err != nil { - invalidParams.AddNested("Delete", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCreate sets the Create field's value. -func (s *ReplicaUpdate) SetCreate(v *CreateReplicaAction) *ReplicaUpdate { - s.Create = v - return s -} - -// SetDelete sets the Delete field's value. -func (s *ReplicaUpdate) SetDelete(v *DeleteReplicaAction) *ReplicaUpdate { - s.Delete = v - return s -} - -// Represents one of the following: -// -// - A new replica to be added to an existing regional table or global table. -// This request invokes the CreateTableReplica action in the destination -// Region. -// -// - New parameters for an existing replica. This request invokes the UpdateTable -// action in the destination Region. -// -// - An existing replica to be deleted. The request invokes the DeleteTableReplica -// action in the destination Region, deleting the replica and all if its -// items in the destination Region. -// -// When you manually remove a table or global table replica, you do not automatically -// remove any associated scalable targets, scaling policies, or CloudWatch alarms. -type ReplicationGroupUpdate struct { - _ struct{} `type:"structure"` - - // The parameters required for creating a replica for the table. - Create *CreateReplicationGroupMemberAction `type:"structure"` - - // The parameters required for deleting a replica for the table. - Delete *DeleteReplicationGroupMemberAction `type:"structure"` - - // The parameters required for updating a replica for the table. - Update *UpdateReplicationGroupMemberAction `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ReplicationGroupUpdate) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ReplicationGroupUpdate) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ReplicationGroupUpdate) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ReplicationGroupUpdate"} - if s.Create != nil { - if err := s.Create.Validate(); err != nil { - invalidParams.AddNested("Create", err.(request.ErrInvalidParams)) - } - } - if s.Delete != nil { - if err := s.Delete.Validate(); err != nil { - invalidParams.AddNested("Delete", err.(request.ErrInvalidParams)) - } - } - if s.Update != nil { - if err := s.Update.Validate(); err != nil { - invalidParams.AddNested("Update", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCreate sets the Create field's value. -func (s *ReplicationGroupUpdate) SetCreate(v *CreateReplicationGroupMemberAction) *ReplicationGroupUpdate { - s.Create = v - return s -} - -// SetDelete sets the Delete field's value. -func (s *ReplicationGroupUpdate) SetDelete(v *DeleteReplicationGroupMemberAction) *ReplicationGroupUpdate { - s.Delete = v - return s -} - -// SetUpdate sets the Update field's value. -func (s *ReplicationGroupUpdate) SetUpdate(v *UpdateReplicationGroupMemberAction) *ReplicationGroupUpdate { - s.Update = v - return s -} - -// Throughput exceeds the current throughput quota for your account. Please -// contact Amazon Web Services Support (https://aws.amazon.com/support) to request -// a quota increase. -type RequestLimitExceeded struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s RequestLimitExceeded) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s RequestLimitExceeded) GoString() string { - return s.String() -} - -func newErrorRequestLimitExceeded(v protocol.ResponseMetadata) error { - return &RequestLimitExceeded{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *RequestLimitExceeded) Code() string { - return "RequestLimitExceeded" -} - -// Message returns the exception's message. -func (s *RequestLimitExceeded) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *RequestLimitExceeded) OrigErr() error { - return nil -} - -func (s *RequestLimitExceeded) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *RequestLimitExceeded) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *RequestLimitExceeded) RequestID() string { - return s.RespMetadata.RequestID -} - -// The operation conflicts with the resource's availability. For example, you -// attempted to recreate an existing table, or tried to delete a table currently -// in the CREATING state. -type ResourceInUseException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - // The resource which is being attempted to be changed is in use. - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ResourceInUseException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ResourceInUseException) GoString() string { - return s.String() -} - -func newErrorResourceInUseException(v protocol.ResponseMetadata) error { - return &ResourceInUseException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *ResourceInUseException) Code() string { - return "ResourceInUseException" -} - -// Message returns the exception's message. -func (s *ResourceInUseException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *ResourceInUseException) OrigErr() error { - return nil -} - -func (s *ResourceInUseException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *ResourceInUseException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *ResourceInUseException) RequestID() string { - return s.RespMetadata.RequestID -} - -// The operation tried to access a nonexistent table or index. The resource -// might not be specified correctly, or its status might not be ACTIVE. -type ResourceNotFoundException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - // The resource which is being requested does not exist. - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ResourceNotFoundException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ResourceNotFoundException) GoString() string { - return s.String() -} - -func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { - return &ResourceNotFoundException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *ResourceNotFoundException) Code() string { - return "ResourceNotFoundException" -} - -// Message returns the exception's message. -func (s *ResourceNotFoundException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *ResourceNotFoundException) OrigErr() error { - return nil -} - -func (s *ResourceNotFoundException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *ResourceNotFoundException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *ResourceNotFoundException) RequestID() string { - return s.RespMetadata.RequestID -} - -// Contains details for the restore. -type RestoreSummary struct { - _ struct{} `type:"structure"` - - // Point in time or source backup time. - // - // RestoreDateTime is a required field - RestoreDateTime *time.Time `type:"timestamp" required:"true"` - - // Indicates if a restore is in progress or not. - // - // RestoreInProgress is a required field - RestoreInProgress *bool `type:"boolean" required:"true"` - - // The Amazon Resource Name (ARN) of the backup from which the table was restored. - SourceBackupArn *string `min:"37" type:"string"` - - // The ARN of the source table of the backup that is being restored. - SourceTableArn *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s RestoreSummary) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s RestoreSummary) GoString() string { - return s.String() -} - -// SetRestoreDateTime sets the RestoreDateTime field's value. -func (s *RestoreSummary) SetRestoreDateTime(v time.Time) *RestoreSummary { - s.RestoreDateTime = &v - return s -} - -// SetRestoreInProgress sets the RestoreInProgress field's value. -func (s *RestoreSummary) SetRestoreInProgress(v bool) *RestoreSummary { - s.RestoreInProgress = &v - return s -} - -// SetSourceBackupArn sets the SourceBackupArn field's value. -func (s *RestoreSummary) SetSourceBackupArn(v string) *RestoreSummary { - s.SourceBackupArn = &v - return s -} - -// SetSourceTableArn sets the SourceTableArn field's value. -func (s *RestoreSummary) SetSourceTableArn(v string) *RestoreSummary { - s.SourceTableArn = &v - return s -} - -type RestoreTableFromBackupInput struct { - _ struct{} `type:"structure"` - - // The Amazon Resource Name (ARN) associated with the backup. - // - // BackupArn is a required field - BackupArn *string `min:"37" type:"string" required:"true"` - - // The billing mode of the restored table. - BillingModeOverride *string `type:"string" enum:"BillingMode"` - - // List of global secondary indexes for the restored table. The indexes provided - // should match existing secondary indexes. You can choose to exclude some or - // all of the indexes at the time of restore. - GlobalSecondaryIndexOverride []*GlobalSecondaryIndex `type:"list"` - - // List of local secondary indexes for the restored table. The indexes provided - // should match existing secondary indexes. You can choose to exclude some or - // all of the indexes at the time of restore. - LocalSecondaryIndexOverride []*LocalSecondaryIndex `type:"list"` - - // Sets the maximum number of read and write units for the specified on-demand - // table. If you use this parameter, you must specify MaxReadRequestUnits, MaxWriteRequestUnits, - // or both. - OnDemandThroughputOverride *OnDemandThroughput `type:"structure"` - - // Provisioned throughput settings for the restored table. - ProvisionedThroughputOverride *ProvisionedThroughput `type:"structure"` - - // The new server-side encryption settings for the restored table. - SSESpecificationOverride *SSESpecification `type:"structure"` - - // The name of the new table to which the backup must be restored. - // - // TargetTableName is a required field - TargetTableName *string `min:"3" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s RestoreTableFromBackupInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s RestoreTableFromBackupInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *RestoreTableFromBackupInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RestoreTableFromBackupInput"} - if s.BackupArn == nil { - invalidParams.Add(request.NewErrParamRequired("BackupArn")) - } - if s.BackupArn != nil && len(*s.BackupArn) < 37 { - invalidParams.Add(request.NewErrParamMinLen("BackupArn", 37)) - } - if s.TargetTableName == nil { - invalidParams.Add(request.NewErrParamRequired("TargetTableName")) - } - if s.TargetTableName != nil && len(*s.TargetTableName) < 3 { - invalidParams.Add(request.NewErrParamMinLen("TargetTableName", 3)) - } - if s.GlobalSecondaryIndexOverride != nil { - for i, v := range s.GlobalSecondaryIndexOverride { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "GlobalSecondaryIndexOverride", i), err.(request.ErrInvalidParams)) - } - } - } - if s.LocalSecondaryIndexOverride != nil { - for i, v := range s.LocalSecondaryIndexOverride { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "LocalSecondaryIndexOverride", i), err.(request.ErrInvalidParams)) - } - } - } - if s.ProvisionedThroughputOverride != nil { - if err := s.ProvisionedThroughputOverride.Validate(); err != nil { - invalidParams.AddNested("ProvisionedThroughputOverride", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBackupArn sets the BackupArn field's value. -func (s *RestoreTableFromBackupInput) SetBackupArn(v string) *RestoreTableFromBackupInput { - s.BackupArn = &v - return s -} - -// SetBillingModeOverride sets the BillingModeOverride field's value. -func (s *RestoreTableFromBackupInput) SetBillingModeOverride(v string) *RestoreTableFromBackupInput { - s.BillingModeOverride = &v - return s -} - -// SetGlobalSecondaryIndexOverride sets the GlobalSecondaryIndexOverride field's value. -func (s *RestoreTableFromBackupInput) SetGlobalSecondaryIndexOverride(v []*GlobalSecondaryIndex) *RestoreTableFromBackupInput { - s.GlobalSecondaryIndexOverride = v - return s -} - -// SetLocalSecondaryIndexOverride sets the LocalSecondaryIndexOverride field's value. -func (s *RestoreTableFromBackupInput) SetLocalSecondaryIndexOverride(v []*LocalSecondaryIndex) *RestoreTableFromBackupInput { - s.LocalSecondaryIndexOverride = v - return s -} - -// SetOnDemandThroughputOverride sets the OnDemandThroughputOverride field's value. -func (s *RestoreTableFromBackupInput) SetOnDemandThroughputOverride(v *OnDemandThroughput) *RestoreTableFromBackupInput { - s.OnDemandThroughputOverride = v - return s -} - -// SetProvisionedThroughputOverride sets the ProvisionedThroughputOverride field's value. -func (s *RestoreTableFromBackupInput) SetProvisionedThroughputOverride(v *ProvisionedThroughput) *RestoreTableFromBackupInput { - s.ProvisionedThroughputOverride = v - return s -} - -// SetSSESpecificationOverride sets the SSESpecificationOverride field's value. -func (s *RestoreTableFromBackupInput) SetSSESpecificationOverride(v *SSESpecification) *RestoreTableFromBackupInput { - s.SSESpecificationOverride = v - return s -} - -// SetTargetTableName sets the TargetTableName field's value. -func (s *RestoreTableFromBackupInput) SetTargetTableName(v string) *RestoreTableFromBackupInput { - s.TargetTableName = &v - return s -} - -type RestoreTableFromBackupOutput struct { - _ struct{} `type:"structure"` - - // The description of the table created from an existing backup. - TableDescription *TableDescription `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s RestoreTableFromBackupOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s RestoreTableFromBackupOutput) GoString() string { - return s.String() -} - -// SetTableDescription sets the TableDescription field's value. -func (s *RestoreTableFromBackupOutput) SetTableDescription(v *TableDescription) *RestoreTableFromBackupOutput { - s.TableDescription = v - return s -} - -type RestoreTableToPointInTimeInput struct { - _ struct{} `type:"structure"` - - // The billing mode of the restored table. - BillingModeOverride *string `type:"string" enum:"BillingMode"` - - // List of global secondary indexes for the restored table. The indexes provided - // should match existing secondary indexes. You can choose to exclude some or - // all of the indexes at the time of restore. - GlobalSecondaryIndexOverride []*GlobalSecondaryIndex `type:"list"` - - // List of local secondary indexes for the restored table. The indexes provided - // should match existing secondary indexes. You can choose to exclude some or - // all of the indexes at the time of restore. - LocalSecondaryIndexOverride []*LocalSecondaryIndex `type:"list"` - - // Sets the maximum number of read and write units for the specified on-demand - // table. If you use this parameter, you must specify MaxReadRequestUnits, MaxWriteRequestUnits, - // or both. - OnDemandThroughputOverride *OnDemandThroughput `type:"structure"` - - // Provisioned throughput settings for the restored table. - ProvisionedThroughputOverride *ProvisionedThroughput `type:"structure"` - - // Time in the past to restore the table to. - RestoreDateTime *time.Time `type:"timestamp"` - - // The new server-side encryption settings for the restored table. - SSESpecificationOverride *SSESpecification `type:"structure"` - - // The DynamoDB table that will be restored. This value is an Amazon Resource - // Name (ARN). - SourceTableArn *string `min:"1" type:"string"` - - // Name of the source table that is being restored. - SourceTableName *string `min:"3" type:"string"` - - // The name of the new table to which it must be restored to. - // - // TargetTableName is a required field - TargetTableName *string `min:"3" type:"string" required:"true"` - - // Restore the table to the latest possible time. LatestRestorableDateTime is - // typically 5 minutes before the current time. - UseLatestRestorableTime *bool `type:"boolean"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s RestoreTableToPointInTimeInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s RestoreTableToPointInTimeInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *RestoreTableToPointInTimeInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RestoreTableToPointInTimeInput"} - if s.SourceTableArn != nil && len(*s.SourceTableArn) < 1 { - invalidParams.Add(request.NewErrParamMinLen("SourceTableArn", 1)) - } - if s.SourceTableName != nil && len(*s.SourceTableName) < 3 { - invalidParams.Add(request.NewErrParamMinLen("SourceTableName", 3)) - } - if s.TargetTableName == nil { - invalidParams.Add(request.NewErrParamRequired("TargetTableName")) - } - if s.TargetTableName != nil && len(*s.TargetTableName) < 3 { - invalidParams.Add(request.NewErrParamMinLen("TargetTableName", 3)) - } - if s.GlobalSecondaryIndexOverride != nil { - for i, v := range s.GlobalSecondaryIndexOverride { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "GlobalSecondaryIndexOverride", i), err.(request.ErrInvalidParams)) - } - } - } - if s.LocalSecondaryIndexOverride != nil { - for i, v := range s.LocalSecondaryIndexOverride { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "LocalSecondaryIndexOverride", i), err.(request.ErrInvalidParams)) - } - } - } - if s.ProvisionedThroughputOverride != nil { - if err := s.ProvisionedThroughputOverride.Validate(); err != nil { - invalidParams.AddNested("ProvisionedThroughputOverride", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBillingModeOverride sets the BillingModeOverride field's value. -func (s *RestoreTableToPointInTimeInput) SetBillingModeOverride(v string) *RestoreTableToPointInTimeInput { - s.BillingModeOverride = &v - return s -} - -// SetGlobalSecondaryIndexOverride sets the GlobalSecondaryIndexOverride field's value. -func (s *RestoreTableToPointInTimeInput) SetGlobalSecondaryIndexOverride(v []*GlobalSecondaryIndex) *RestoreTableToPointInTimeInput { - s.GlobalSecondaryIndexOverride = v - return s -} - -// SetLocalSecondaryIndexOverride sets the LocalSecondaryIndexOverride field's value. -func (s *RestoreTableToPointInTimeInput) SetLocalSecondaryIndexOverride(v []*LocalSecondaryIndex) *RestoreTableToPointInTimeInput { - s.LocalSecondaryIndexOverride = v - return s -} - -// SetOnDemandThroughputOverride sets the OnDemandThroughputOverride field's value. -func (s *RestoreTableToPointInTimeInput) SetOnDemandThroughputOverride(v *OnDemandThroughput) *RestoreTableToPointInTimeInput { - s.OnDemandThroughputOverride = v - return s -} - -// SetProvisionedThroughputOverride sets the ProvisionedThroughputOverride field's value. -func (s *RestoreTableToPointInTimeInput) SetProvisionedThroughputOverride(v *ProvisionedThroughput) *RestoreTableToPointInTimeInput { - s.ProvisionedThroughputOverride = v - return s -} - -// SetRestoreDateTime sets the RestoreDateTime field's value. -func (s *RestoreTableToPointInTimeInput) SetRestoreDateTime(v time.Time) *RestoreTableToPointInTimeInput { - s.RestoreDateTime = &v - return s -} - -// SetSSESpecificationOverride sets the SSESpecificationOverride field's value. -func (s *RestoreTableToPointInTimeInput) SetSSESpecificationOverride(v *SSESpecification) *RestoreTableToPointInTimeInput { - s.SSESpecificationOverride = v - return s -} - -// SetSourceTableArn sets the SourceTableArn field's value. -func (s *RestoreTableToPointInTimeInput) SetSourceTableArn(v string) *RestoreTableToPointInTimeInput { - s.SourceTableArn = &v - return s -} - -// SetSourceTableName sets the SourceTableName field's value. -func (s *RestoreTableToPointInTimeInput) SetSourceTableName(v string) *RestoreTableToPointInTimeInput { - s.SourceTableName = &v - return s -} - -// SetTargetTableName sets the TargetTableName field's value. -func (s *RestoreTableToPointInTimeInput) SetTargetTableName(v string) *RestoreTableToPointInTimeInput { - s.TargetTableName = &v - return s -} - -// SetUseLatestRestorableTime sets the UseLatestRestorableTime field's value. -func (s *RestoreTableToPointInTimeInput) SetUseLatestRestorableTime(v bool) *RestoreTableToPointInTimeInput { - s.UseLatestRestorableTime = &v - return s -} - -type RestoreTableToPointInTimeOutput struct { - _ struct{} `type:"structure"` - - // Represents the properties of a table. - TableDescription *TableDescription `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s RestoreTableToPointInTimeOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s RestoreTableToPointInTimeOutput) GoString() string { - return s.String() -} - -// SetTableDescription sets the TableDescription field's value. -func (s *RestoreTableToPointInTimeOutput) SetTableDescription(v *TableDescription) *RestoreTableToPointInTimeOutput { - s.TableDescription = v - return s -} - -// The S3 bucket that is being imported from. -type S3BucketSource struct { - _ struct{} `type:"structure"` - - // The S3 bucket that is being imported from. - // - // S3Bucket is a required field - S3Bucket *string `type:"string" required:"true"` - - // The account number of the S3 bucket that is being imported from. If the bucket - // is owned by the requester this is optional. - S3BucketOwner *string `type:"string"` - - // The key prefix shared by all S3 Objects that are being imported. - S3KeyPrefix *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s S3BucketSource) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s S3BucketSource) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *S3BucketSource) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "S3BucketSource"} - if s.S3Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("S3Bucket")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetS3Bucket sets the S3Bucket field's value. -func (s *S3BucketSource) SetS3Bucket(v string) *S3BucketSource { - s.S3Bucket = &v - return s -} - -// SetS3BucketOwner sets the S3BucketOwner field's value. -func (s *S3BucketSource) SetS3BucketOwner(v string) *S3BucketSource { - s.S3BucketOwner = &v - return s -} - -// SetS3KeyPrefix sets the S3KeyPrefix field's value. -func (s *S3BucketSource) SetS3KeyPrefix(v string) *S3BucketSource { - s.S3KeyPrefix = &v - return s -} - -// The description of the server-side encryption status on the specified table. -type SSEDescription struct { - _ struct{} `type:"structure"` - - // Indicates the time, in UNIX epoch date format, when DynamoDB detected that - // the table's KMS key was inaccessible. This attribute will automatically be - // cleared when DynamoDB detects that the table's KMS key is accessible again. - // DynamoDB will initiate the table archival process when table's KMS key remains - // inaccessible for more than seven days from this date. - InaccessibleEncryptionDateTime *time.Time `type:"timestamp"` - - // The KMS key ARN used for the KMS encryption. - KMSMasterKeyArn *string `type:"string"` - - // Server-side encryption type. The only supported value is: - // - // * KMS - Server-side encryption that uses Key Management Service. The key - // is stored in your account and is managed by KMS (KMS charges apply). - SSEType *string `type:"string" enum:"SSEType"` - - // Represents the current state of server-side encryption. The only supported - // values are: - // - // * ENABLED - Server-side encryption is enabled. - // - // * UPDATING - Server-side encryption is being updated. - Status *string `type:"string" enum:"SSEStatus"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s SSEDescription) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s SSEDescription) GoString() string { - return s.String() -} - -// SetInaccessibleEncryptionDateTime sets the InaccessibleEncryptionDateTime field's value. -func (s *SSEDescription) SetInaccessibleEncryptionDateTime(v time.Time) *SSEDescription { - s.InaccessibleEncryptionDateTime = &v - return s -} - -// SetKMSMasterKeyArn sets the KMSMasterKeyArn field's value. -func (s *SSEDescription) SetKMSMasterKeyArn(v string) *SSEDescription { - s.KMSMasterKeyArn = &v - return s -} - -// SetSSEType sets the SSEType field's value. -func (s *SSEDescription) SetSSEType(v string) *SSEDescription { - s.SSEType = &v - return s -} - -// SetStatus sets the Status field's value. -func (s *SSEDescription) SetStatus(v string) *SSEDescription { - s.Status = &v - return s -} - -// Represents the settings used to enable server-side encryption. -type SSESpecification struct { - _ struct{} `type:"structure"` - - // Indicates whether server-side encryption is done using an Amazon Web Services - // managed key or an Amazon Web Services owned key. If enabled (true), server-side - // encryption type is set to KMS and an Amazon Web Services managed key is used - // (KMS charges apply). If disabled (false) or not specified, server-side encryption - // is set to Amazon Web Services owned key. - Enabled *bool `type:"boolean"` - - // The KMS key that should be used for the KMS encryption. To specify a key, - // use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. Note - // that you should only provide this parameter if the key is different from - // the default DynamoDB key alias/aws/dynamodb. - KMSMasterKeyId *string `type:"string"` - - // Server-side encryption type. The only supported value is: - // - // * KMS - Server-side encryption that uses Key Management Service. The key - // is stored in your account and is managed by KMS (KMS charges apply). - SSEType *string `type:"string" enum:"SSEType"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s SSESpecification) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s SSESpecification) GoString() string { - return s.String() -} - -// SetEnabled sets the Enabled field's value. -func (s *SSESpecification) SetEnabled(v bool) *SSESpecification { - s.Enabled = &v - return s -} - -// SetKMSMasterKeyId sets the KMSMasterKeyId field's value. -func (s *SSESpecification) SetKMSMasterKeyId(v string) *SSESpecification { - s.KMSMasterKeyId = &v - return s -} - -// SetSSEType sets the SSEType field's value. -func (s *SSESpecification) SetSSEType(v string) *SSESpecification { - s.SSEType = &v - return s -} - -// Represents the input of a Scan operation. -type ScanInput struct { - _ struct{} `type:"structure"` - - // This is a legacy parameter. Use ProjectionExpression instead. For more information, - // see AttributesToGet (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributesToGet.html) - // in the Amazon DynamoDB Developer Guide. - AttributesToGet []*string `min:"1" type:"list"` - - // This is a legacy parameter. Use FilterExpression instead. For more information, - // see ConditionalOperator (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html) - // in the Amazon DynamoDB Developer Guide. - ConditionalOperator *string `type:"string" enum:"ConditionalOperator"` - - // A Boolean value that determines the read consistency model during the scan: - // - // * If ConsistentRead is false, then the data returned from Scan might not - // contain the results from other recently completed write operations (PutItem, - // UpdateItem, or DeleteItem). - // - // * If ConsistentRead is true, then all of the write operations that completed - // before the Scan began are guaranteed to be contained in the Scan response. - // - // The default setting for ConsistentRead is false. - // - // The ConsistentRead parameter is not supported on global secondary indexes. - // If you scan a global secondary index with ConsistentRead set to true, you - // will receive a ValidationException. - ConsistentRead *bool `type:"boolean"` - - // The primary key of the first item that this operation will evaluate. Use - // the value that was returned for LastEvaluatedKey in the previous operation. - // - // The data type for ExclusiveStartKey must be String, Number or Binary. No - // set data types are allowed. - // - // In a parallel scan, a Scan request that includes ExclusiveStartKey must specify - // the same segment whose previous Scan returned the corresponding value of - // LastEvaluatedKey. - ExclusiveStartKey map[string]*AttributeValue `type:"map"` - - // One or more substitution tokens for attribute names in an expression. The - // following are some use cases for using ExpressionAttributeNames: - // - // * To access an attribute whose name conflicts with a DynamoDB reserved - // word. - // - // * To create a placeholder for repeating occurrences of an attribute name - // in an expression. - // - // * To prevent special characters in an attribute name from being misinterpreted - // in an expression. - // - // Use the # character in an expression to dereference an attribute name. For - // example, consider the following attribute name: - // - // * Percentile - // - // The name of this attribute conflicts with a reserved word, so it cannot be - // used directly in an expression. (For the complete list of reserved words, - // see Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) - // in the Amazon DynamoDB Developer Guide). To work around this, you could specify - // the following for ExpressionAttributeNames: - // - // * {"#P":"Percentile"} - // - // You could then use this substitution in an expression, as in this example: - // - // * #P = :val - // - // Tokens that begin with the : character are expression attribute values, which - // are placeholders for the actual value at runtime. - // - // For more information on expression attribute names, see Specifying Item Attributes - // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) - // in the Amazon DynamoDB Developer Guide. - ExpressionAttributeNames map[string]*string `type:"map"` - - // One or more values that can be substituted in an expression. - // - // Use the : (colon) character in an expression to dereference an attribute - // value. For example, suppose that you wanted to check whether the value of - // the ProductStatus attribute was one of the following: - // - // Available | Backordered | Discontinued - // - // You would first need to specify ExpressionAttributeValues as follows: - // - // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"} - // } - // - // You could then use these values in an expression, such as this: - // - // ProductStatus IN (:avail, :back, :disc) - // - // For more information on expression attribute values, see Condition Expressions - // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) - // in the Amazon DynamoDB Developer Guide. - ExpressionAttributeValues map[string]*AttributeValue `type:"map"` - - // A string that contains conditions that DynamoDB applies after the Scan operation, - // but before the data is returned to you. Items that do not satisfy the FilterExpression - // criteria are not returned. - // - // A FilterExpression is applied after the items have already been read; the - // process of filtering does not consume any additional read capacity units. - // - // For more information, see Filter Expressions (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Scan.html#Scan.FilterExpression) - // in the Amazon DynamoDB Developer Guide. - FilterExpression *string `type:"string"` - - // The name of a secondary index to scan. This index can be any local secondary - // index or global secondary index. Note that if you use the IndexName parameter, - // you must also provide TableName. - IndexName *string `min:"3" type:"string"` - - // The maximum number of items to evaluate (not necessarily the number of matching - // items). If DynamoDB processes the number of items up to the limit while processing - // the results, it stops the operation and returns the matching values up to - // that point, and a key in LastEvaluatedKey to apply in a subsequent operation, - // so that you can pick up where you left off. Also, if the processed dataset - // size exceeds 1 MB before DynamoDB reaches this limit, it stops the operation - // and returns the matching values up to the limit, and a key in LastEvaluatedKey - // to apply in a subsequent operation to continue the operation. For more information, - // see Working with Queries (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html) - // in the Amazon DynamoDB Developer Guide. - Limit *int64 `min:"1" type:"integer"` - - // A string that identifies one or more attributes to retrieve from the specified - // table or index. These attributes can include scalars, sets, or elements of - // a JSON document. The attributes in the expression must be separated by commas. - // - // If no attribute names are specified, then all attributes will be returned. - // If any of the requested attributes are not found, they will not appear in - // the result. - // - // For more information, see Specifying Item Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) - // in the Amazon DynamoDB Developer Guide. - ProjectionExpression *string `type:"string"` - - // Determines the level of detail about either provisioned or on-demand throughput - // consumption that is returned in the response: - // - // * INDEXES - The response includes the aggregate ConsumedCapacity for the - // operation, together with ConsumedCapacity for each table and secondary - // index that was accessed. Note that some operations, such as GetItem and - // BatchGetItem, do not access any indexes at all. In these cases, specifying - // INDEXES will only return ConsumedCapacity information for table(s). - // - // * TOTAL - The response includes only the aggregate ConsumedCapacity for - // the operation. - // - // * NONE - No ConsumedCapacity details are included in the response. - ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` - - // This is a legacy parameter. Use FilterExpression instead. For more information, - // see ScanFilter (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ScanFilter.html) - // in the Amazon DynamoDB Developer Guide. - ScanFilter map[string]*Condition `type:"map"` - - // For a parallel Scan request, Segment identifies an individual segment to - // be scanned by an application worker. - // - // Segment IDs are zero-based, so the first segment is always 0. For example, - // if you want to use four application threads to scan a table or an index, - // then the first thread specifies a Segment value of 0, the second thread specifies - // 1, and so on. - // - // The value of LastEvaluatedKey returned from a parallel Scan request must - // be used as ExclusiveStartKey with the same segment ID in a subsequent Scan - // operation. - // - // The value for Segment must be greater than or equal to 0, and less than the - // value provided for TotalSegments. - // - // If you provide Segment, you must also provide TotalSegments. - Segment *int64 `type:"integer"` - - // The attributes to be returned in the result. You can retrieve all item attributes, - // specific item attributes, the count of matching items, or in the case of - // an index, some or all of the attributes projected into the index. - // - // * ALL_ATTRIBUTES - Returns all of the item attributes from the specified - // table or index. If you query a local secondary index, then for each matching - // item in the index, DynamoDB fetches the entire item from the parent table. - // If the index is configured to project all item attributes, then all of - // the data can be obtained from the local secondary index, and no fetching - // is required. - // - // * ALL_PROJECTED_ATTRIBUTES - Allowed only when querying an index. Retrieves - // all attributes that have been projected into the index. If the index is - // configured to project all attributes, this return value is equivalent - // to specifying ALL_ATTRIBUTES. - // - // * COUNT - Returns the number of matching items, rather than the matching - // items themselves. Note that this uses the same quantity of read capacity - // units as getting the items, and is subject to the same item size calculations. - // - // * SPECIFIC_ATTRIBUTES - Returns only the attributes listed in ProjectionExpression. - // This return value is equivalent to specifying ProjectionExpression without - // specifying any value for Select. If you query or scan a local secondary - // index and request only attributes that are projected into that index, - // the operation reads only the index and not the table. If any of the requested - // attributes are not projected into the local secondary index, DynamoDB - // fetches each of these attributes from the parent table. This extra fetching - // incurs additional throughput cost and latency. If you query or scan a - // global secondary index, you can only request attributes that are projected - // into the index. Global secondary index queries cannot fetch attributes - // from the parent table. - // - // If neither Select nor ProjectionExpression are specified, DynamoDB defaults - // to ALL_ATTRIBUTES when accessing a table, and ALL_PROJECTED_ATTRIBUTES when - // accessing an index. You cannot use both Select and ProjectionExpression together - // in a single request, unless the value for Select is SPECIFIC_ATTRIBUTES. - // (This usage is equivalent to specifying ProjectionExpression without any - // value for Select.) - // - // If you use the ProjectionExpression parameter, then the value for Select - // can only be SPECIFIC_ATTRIBUTES. Any other value for Select will return an - // error. - Select *string `type:"string" enum:"Select"` - - // The name of the table containing the requested items or if you provide IndexName, - // the name of the table to which that index belongs. - // - // You can also provide the Amazon Resource Name (ARN) of the table in this - // parameter. - // - // TableName is a required field - TableName *string `min:"1" type:"string" required:"true"` - - // For a parallel Scan request, TotalSegments represents the total number of - // segments into which the Scan operation will be divided. The value of TotalSegments - // corresponds to the number of application workers that will perform the parallel - // scan. For example, if you want to use four application threads to scan a - // table or an index, specify a TotalSegments value of 4. - // - // The value for TotalSegments must be greater than or equal to 1, and less - // than or equal to 1000000. If you specify a TotalSegments value of 1, the - // Scan operation will be sequential rather than parallel. - // - // If you specify TotalSegments, you must also specify Segment. - TotalSegments *int64 `min:"1" type:"integer"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ScanInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ScanInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ScanInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ScanInput"} - if s.AttributesToGet != nil && len(s.AttributesToGet) < 1 { - invalidParams.Add(request.NewErrParamMinLen("AttributesToGet", 1)) - } - if s.IndexName != nil && len(*s.IndexName) < 3 { - invalidParams.Add(request.NewErrParamMinLen("IndexName", 3)) - } - if s.Limit != nil && *s.Limit < 1 { - invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) - } - if s.TableName == nil { - invalidParams.Add(request.NewErrParamRequired("TableName")) - } - if s.TableName != nil && len(*s.TableName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) - } - if s.TotalSegments != nil && *s.TotalSegments < 1 { - invalidParams.Add(request.NewErrParamMinValue("TotalSegments", 1)) - } - if s.ScanFilter != nil { - for i, v := range s.ScanFilter { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ScanFilter", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAttributesToGet sets the AttributesToGet field's value. -func (s *ScanInput) SetAttributesToGet(v []*string) *ScanInput { - s.AttributesToGet = v - return s -} - -// SetConditionalOperator sets the ConditionalOperator field's value. -func (s *ScanInput) SetConditionalOperator(v string) *ScanInput { - s.ConditionalOperator = &v - return s -} - -// SetConsistentRead sets the ConsistentRead field's value. -func (s *ScanInput) SetConsistentRead(v bool) *ScanInput { - s.ConsistentRead = &v - return s -} - -// SetExclusiveStartKey sets the ExclusiveStartKey field's value. -func (s *ScanInput) SetExclusiveStartKey(v map[string]*AttributeValue) *ScanInput { - s.ExclusiveStartKey = v - return s -} - -// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value. -func (s *ScanInput) SetExpressionAttributeNames(v map[string]*string) *ScanInput { - s.ExpressionAttributeNames = v - return s -} - -// SetExpressionAttributeValues sets the ExpressionAttributeValues field's value. -func (s *ScanInput) SetExpressionAttributeValues(v map[string]*AttributeValue) *ScanInput { - s.ExpressionAttributeValues = v - return s -} - -// SetFilterExpression sets the FilterExpression field's value. -func (s *ScanInput) SetFilterExpression(v string) *ScanInput { - s.FilterExpression = &v - return s -} - -// SetIndexName sets the IndexName field's value. -func (s *ScanInput) SetIndexName(v string) *ScanInput { - s.IndexName = &v - return s -} - -// SetLimit sets the Limit field's value. -func (s *ScanInput) SetLimit(v int64) *ScanInput { - s.Limit = &v - return s -} - -// SetProjectionExpression sets the ProjectionExpression field's value. -func (s *ScanInput) SetProjectionExpression(v string) *ScanInput { - s.ProjectionExpression = &v - return s -} - -// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value. -func (s *ScanInput) SetReturnConsumedCapacity(v string) *ScanInput { - s.ReturnConsumedCapacity = &v - return s -} - -// SetScanFilter sets the ScanFilter field's value. -func (s *ScanInput) SetScanFilter(v map[string]*Condition) *ScanInput { - s.ScanFilter = v - return s -} - -// SetSegment sets the Segment field's value. -func (s *ScanInput) SetSegment(v int64) *ScanInput { - s.Segment = &v - return s -} - -// SetSelect sets the Select field's value. -func (s *ScanInput) SetSelect(v string) *ScanInput { - s.Select = &v - return s -} - -// SetTableName sets the TableName field's value. -func (s *ScanInput) SetTableName(v string) *ScanInput { - s.TableName = &v - return s -} - -// SetTotalSegments sets the TotalSegments field's value. -func (s *ScanInput) SetTotalSegments(v int64) *ScanInput { - s.TotalSegments = &v - return s -} - -// Represents the output of a Scan operation. -type ScanOutput struct { - _ struct{} `type:"structure"` - - // The capacity units consumed by the Scan operation. The data returned includes - // the total provisioned throughput consumed, along with statistics for the - // table and any indexes involved in the operation. ConsumedCapacity is only - // returned if the ReturnConsumedCapacity parameter was specified. For more - // information, see Capacity unit consumption for read operations (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/read-write-operations.html#read-operation-consumption) - // in the Amazon DynamoDB Developer Guide. - ConsumedCapacity *ConsumedCapacity `type:"structure"` - - // The number of items in the response. - // - // If you set ScanFilter in the request, then Count is the number of items returned - // after the filter was applied, and ScannedCount is the number of matching - // items before the filter was applied. - // - // If you did not use a filter in the request, then Count is the same as ScannedCount. - Count *int64 `type:"integer"` - - // An array of item attributes that match the scan criteria. Each element in - // this array consists of an attribute name and the value for that attribute. - Items []map[string]*AttributeValue `type:"list"` - - // The primary key of the item where the operation stopped, inclusive of the - // previous result set. Use this value to start a new operation, excluding this - // value in the new request. - // - // If LastEvaluatedKey is empty, then the "last page" of results has been processed - // and there is no more data to be retrieved. - // - // If LastEvaluatedKey is not empty, it does not necessarily mean that there - // is more data in the result set. The only way to know when you have reached - // the end of the result set is when LastEvaluatedKey is empty. - LastEvaluatedKey map[string]*AttributeValue `type:"map"` - - // The number of items evaluated, before any ScanFilter is applied. A high ScannedCount - // value with few, or no, Count results indicates an inefficient Scan operation. - // For more information, see Count and ScannedCount (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html#Count) - // in the Amazon DynamoDB Developer Guide. - // - // If you did not use a filter in the request, then ScannedCount is the same - // as Count. - ScannedCount *int64 `type:"integer"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ScanOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ScanOutput) GoString() string { - return s.String() -} - -// SetConsumedCapacity sets the ConsumedCapacity field's value. -func (s *ScanOutput) SetConsumedCapacity(v *ConsumedCapacity) *ScanOutput { - s.ConsumedCapacity = v - return s -} - -// SetCount sets the Count field's value. -func (s *ScanOutput) SetCount(v int64) *ScanOutput { - s.Count = &v - return s -} - -// SetItems sets the Items field's value. -func (s *ScanOutput) SetItems(v []map[string]*AttributeValue) *ScanOutput { - s.Items = v - return s -} - -// SetLastEvaluatedKey sets the LastEvaluatedKey field's value. -func (s *ScanOutput) SetLastEvaluatedKey(v map[string]*AttributeValue) *ScanOutput { - s.LastEvaluatedKey = v - return s -} - -// SetScannedCount sets the ScannedCount field's value. -func (s *ScanOutput) SetScannedCount(v int64) *ScanOutput { - s.ScannedCount = &v - return s -} - -// Contains the details of the table when the backup was created. -type SourceTableDetails struct { - _ struct{} `type:"structure"` - - // Controls how you are charged for read and write throughput and how you manage - // capacity. This setting can be changed later. - // - // * PROVISIONED - Sets the read/write capacity mode to PROVISIONED. We recommend - // using PROVISIONED for predictable workloads. - // - // * PAY_PER_REQUEST - Sets the read/write capacity mode to PAY_PER_REQUEST. - // We recommend using PAY_PER_REQUEST for unpredictable workloads. - BillingMode *string `type:"string" enum:"BillingMode"` - - // Number of items in the table. Note that this is an approximate value. - ItemCount *int64 `type:"long"` - - // Schema of the table. - // - // KeySchema is a required field - KeySchema []*KeySchemaElement `min:"1" type:"list" required:"true"` - - // Sets the maximum number of read and write units for the specified on-demand - // table. If you use this parameter, you must specify MaxReadRequestUnits, MaxWriteRequestUnits, - // or both. - OnDemandThroughput *OnDemandThroughput `type:"structure"` - - // Read IOPs and Write IOPS on the table when the backup was created. - // - // ProvisionedThroughput is a required field - ProvisionedThroughput *ProvisionedThroughput `type:"structure" required:"true"` - - // ARN of the table for which backup was created. - TableArn *string `min:"1" type:"string"` - - // Time when the source table was created. - // - // TableCreationDateTime is a required field - TableCreationDateTime *time.Time `type:"timestamp" required:"true"` - - // Unique identifier for the table for which the backup was created. - // - // TableId is a required field - TableId *string `type:"string" required:"true"` - - // The name of the table for which the backup was created. - // - // TableName is a required field - TableName *string `min:"3" type:"string" required:"true"` - - // Size of the table in bytes. Note that this is an approximate value. - TableSizeBytes *int64 `type:"long"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s SourceTableDetails) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s SourceTableDetails) GoString() string { - return s.String() -} - -// SetBillingMode sets the BillingMode field's value. -func (s *SourceTableDetails) SetBillingMode(v string) *SourceTableDetails { - s.BillingMode = &v - return s -} - -// SetItemCount sets the ItemCount field's value. -func (s *SourceTableDetails) SetItemCount(v int64) *SourceTableDetails { - s.ItemCount = &v - return s -} - -// SetKeySchema sets the KeySchema field's value. -func (s *SourceTableDetails) SetKeySchema(v []*KeySchemaElement) *SourceTableDetails { - s.KeySchema = v - return s -} - -// SetOnDemandThroughput sets the OnDemandThroughput field's value. -func (s *SourceTableDetails) SetOnDemandThroughput(v *OnDemandThroughput) *SourceTableDetails { - s.OnDemandThroughput = v - return s -} - -// SetProvisionedThroughput sets the ProvisionedThroughput field's value. -func (s *SourceTableDetails) SetProvisionedThroughput(v *ProvisionedThroughput) *SourceTableDetails { - s.ProvisionedThroughput = v - return s -} - -// SetTableArn sets the TableArn field's value. -func (s *SourceTableDetails) SetTableArn(v string) *SourceTableDetails { - s.TableArn = &v - return s -} - -// SetTableCreationDateTime sets the TableCreationDateTime field's value. -func (s *SourceTableDetails) SetTableCreationDateTime(v time.Time) *SourceTableDetails { - s.TableCreationDateTime = &v - return s -} - -// SetTableId sets the TableId field's value. -func (s *SourceTableDetails) SetTableId(v string) *SourceTableDetails { - s.TableId = &v - return s -} - -// SetTableName sets the TableName field's value. -func (s *SourceTableDetails) SetTableName(v string) *SourceTableDetails { - s.TableName = &v - return s -} - -// SetTableSizeBytes sets the TableSizeBytes field's value. -func (s *SourceTableDetails) SetTableSizeBytes(v int64) *SourceTableDetails { - s.TableSizeBytes = &v - return s -} - -// Contains the details of the features enabled on the table when the backup -// was created. For example, LSIs, GSIs, streams, TTL. -type SourceTableFeatureDetails struct { - _ struct{} `type:"structure"` - - // Represents the GSI properties for the table when the backup was created. - // It includes the IndexName, KeySchema, Projection, and ProvisionedThroughput - // for the GSIs on the table at the time of backup. - GlobalSecondaryIndexes []*GlobalSecondaryIndexInfo `type:"list"` - - // Represents the LSI properties for the table when the backup was created. - // It includes the IndexName, KeySchema and Projection for the LSIs on the table - // at the time of backup. - LocalSecondaryIndexes []*LocalSecondaryIndexInfo `type:"list"` - - // The description of the server-side encryption status on the table when the - // backup was created. - SSEDescription *SSEDescription `type:"structure"` - - // Stream settings on the table when the backup was created. - StreamDescription *StreamSpecification `type:"structure"` - - // Time to Live settings on the table when the backup was created. - TimeToLiveDescription *TimeToLiveDescription `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s SourceTableFeatureDetails) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s SourceTableFeatureDetails) GoString() string { - return s.String() -} - -// SetGlobalSecondaryIndexes sets the GlobalSecondaryIndexes field's value. -func (s *SourceTableFeatureDetails) SetGlobalSecondaryIndexes(v []*GlobalSecondaryIndexInfo) *SourceTableFeatureDetails { - s.GlobalSecondaryIndexes = v - return s -} - -// SetLocalSecondaryIndexes sets the LocalSecondaryIndexes field's value. -func (s *SourceTableFeatureDetails) SetLocalSecondaryIndexes(v []*LocalSecondaryIndexInfo) *SourceTableFeatureDetails { - s.LocalSecondaryIndexes = v - return s -} - -// SetSSEDescription sets the SSEDescription field's value. -func (s *SourceTableFeatureDetails) SetSSEDescription(v *SSEDescription) *SourceTableFeatureDetails { - s.SSEDescription = v - return s -} - -// SetStreamDescription sets the StreamDescription field's value. -func (s *SourceTableFeatureDetails) SetStreamDescription(v *StreamSpecification) *SourceTableFeatureDetails { - s.StreamDescription = v - return s -} - -// SetTimeToLiveDescription sets the TimeToLiveDescription field's value. -func (s *SourceTableFeatureDetails) SetTimeToLiveDescription(v *TimeToLiveDescription) *SourceTableFeatureDetails { - s.TimeToLiveDescription = v - return s -} - -// Represents the DynamoDB Streams configuration for a table in DynamoDB. -type StreamSpecification struct { - _ struct{} `type:"structure"` - - // Indicates whether DynamoDB Streams is enabled (true) or disabled (false) - // on the table. - // - // StreamEnabled is a required field - StreamEnabled *bool `type:"boolean" required:"true"` - - // When an item in the table is modified, StreamViewType determines what information - // is written to the stream for this table. Valid values for StreamViewType - // are: - // - // * KEYS_ONLY - Only the key attributes of the modified item are written - // to the stream. - // - // * NEW_IMAGE - The entire item, as it appears after it was modified, is - // written to the stream. - // - // * OLD_IMAGE - The entire item, as it appeared before it was modified, - // is written to the stream. - // - // * NEW_AND_OLD_IMAGES - Both the new and the old item images of the item - // are written to the stream. - StreamViewType *string `type:"string" enum:"StreamViewType"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s StreamSpecification) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s StreamSpecification) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *StreamSpecification) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "StreamSpecification"} - if s.StreamEnabled == nil { - invalidParams.Add(request.NewErrParamRequired("StreamEnabled")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetStreamEnabled sets the StreamEnabled field's value. -func (s *StreamSpecification) SetStreamEnabled(v bool) *StreamSpecification { - s.StreamEnabled = &v - return s -} - -// SetStreamViewType sets the StreamViewType field's value. -func (s *StreamSpecification) SetStreamViewType(v string) *StreamSpecification { - s.StreamViewType = &v - return s -} - -// A target table with the specified name already exists. -type TableAlreadyExistsException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TableAlreadyExistsException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TableAlreadyExistsException) GoString() string { - return s.String() -} - -func newErrorTableAlreadyExistsException(v protocol.ResponseMetadata) error { - return &TableAlreadyExistsException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *TableAlreadyExistsException) Code() string { - return "TableAlreadyExistsException" -} - -// Message returns the exception's message. -func (s *TableAlreadyExistsException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *TableAlreadyExistsException) OrigErr() error { - return nil -} - -func (s *TableAlreadyExistsException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *TableAlreadyExistsException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *TableAlreadyExistsException) RequestID() string { - return s.RespMetadata.RequestID -} - -// Represents the auto scaling configuration for a global table. -type TableAutoScalingDescription struct { - _ struct{} `type:"structure"` - - // Represents replicas of the global table. - Replicas []*ReplicaAutoScalingDescription `type:"list"` - - // The name of the table. - TableName *string `min:"3" type:"string"` - - // The current state of the table: - // - // * CREATING - The table is being created. - // - // * UPDATING - The table is being updated. - // - // * DELETING - The table is being deleted. - // - // * ACTIVE - The table is ready for use. - TableStatus *string `type:"string" enum:"TableStatus"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TableAutoScalingDescription) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TableAutoScalingDescription) GoString() string { - return s.String() -} - -// SetReplicas sets the Replicas field's value. -func (s *TableAutoScalingDescription) SetReplicas(v []*ReplicaAutoScalingDescription) *TableAutoScalingDescription { - s.Replicas = v - return s -} - -// SetTableName sets the TableName field's value. -func (s *TableAutoScalingDescription) SetTableName(v string) *TableAutoScalingDescription { - s.TableName = &v - return s -} - -// SetTableStatus sets the TableStatus field's value. -func (s *TableAutoScalingDescription) SetTableStatus(v string) *TableAutoScalingDescription { - s.TableStatus = &v - return s -} - -// Contains details of the table class. -type TableClassSummary struct { - _ struct{} `type:"structure"` - - // The date and time at which the table class was last updated. - LastUpdateDateTime *time.Time `type:"timestamp"` - - // The table class of the specified table. Valid values are STANDARD and STANDARD_INFREQUENT_ACCESS. - TableClass *string `type:"string" enum:"TableClass"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TableClassSummary) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TableClassSummary) GoString() string { - return s.String() -} - -// SetLastUpdateDateTime sets the LastUpdateDateTime field's value. -func (s *TableClassSummary) SetLastUpdateDateTime(v time.Time) *TableClassSummary { - s.LastUpdateDateTime = &v - return s -} - -// SetTableClass sets the TableClass field's value. -func (s *TableClassSummary) SetTableClass(v string) *TableClassSummary { - s.TableClass = &v - return s -} - -// The parameters for the table created as part of the import operation. -type TableCreationParameters struct { - _ struct{} `type:"structure"` - - // The attributes of the table created as part of the import operation. - // - // AttributeDefinitions is a required field - AttributeDefinitions []*AttributeDefinition `type:"list" required:"true"` - - // The billing mode for provisioning the table created as part of the import - // operation. - BillingMode *string `type:"string" enum:"BillingMode"` - - // The Global Secondary Indexes (GSI) of the table to be created as part of - // the import operation. - GlobalSecondaryIndexes []*GlobalSecondaryIndex `type:"list"` - - // The primary key and option sort key of the table created as part of the import - // operation. - // - // KeySchema is a required field - KeySchema []*KeySchemaElement `min:"1" type:"list" required:"true"` - - // Sets the maximum number of read and write units for the specified on-demand - // table. If you use this parameter, you must specify MaxReadRequestUnits, MaxWriteRequestUnits, - // or both. - OnDemandThroughput *OnDemandThroughput `type:"structure"` - - // Represents the provisioned throughput settings for a specified table or index. - // The settings can be modified using the UpdateTable operation. - // - // For current minimum and maximum provisioned throughput values, see Service, - // Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) - // in the Amazon DynamoDB Developer Guide. - ProvisionedThroughput *ProvisionedThroughput `type:"structure"` - - // Represents the settings used to enable server-side encryption. - SSESpecification *SSESpecification `type:"structure"` - - // The name of the table created as part of the import operation. - // - // TableName is a required field - TableName *string `min:"3" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TableCreationParameters) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TableCreationParameters) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *TableCreationParameters) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "TableCreationParameters"} - if s.AttributeDefinitions == nil { - invalidParams.Add(request.NewErrParamRequired("AttributeDefinitions")) - } - if s.KeySchema == nil { - invalidParams.Add(request.NewErrParamRequired("KeySchema")) - } - if s.KeySchema != nil && len(s.KeySchema) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeySchema", 1)) - } - if s.TableName == nil { - invalidParams.Add(request.NewErrParamRequired("TableName")) - } - if s.TableName != nil && len(*s.TableName) < 3 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) - } - if s.AttributeDefinitions != nil { - for i, v := range s.AttributeDefinitions { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AttributeDefinitions", i), err.(request.ErrInvalidParams)) - } - } - } - if s.GlobalSecondaryIndexes != nil { - for i, v := range s.GlobalSecondaryIndexes { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "GlobalSecondaryIndexes", i), err.(request.ErrInvalidParams)) - } - } - } - if s.KeySchema != nil { - for i, v := range s.KeySchema { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "KeySchema", i), err.(request.ErrInvalidParams)) - } - } - } - if s.ProvisionedThroughput != nil { - if err := s.ProvisionedThroughput.Validate(); err != nil { - invalidParams.AddNested("ProvisionedThroughput", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAttributeDefinitions sets the AttributeDefinitions field's value. -func (s *TableCreationParameters) SetAttributeDefinitions(v []*AttributeDefinition) *TableCreationParameters { - s.AttributeDefinitions = v - return s -} - -// SetBillingMode sets the BillingMode field's value. -func (s *TableCreationParameters) SetBillingMode(v string) *TableCreationParameters { - s.BillingMode = &v - return s -} - -// SetGlobalSecondaryIndexes sets the GlobalSecondaryIndexes field's value. -func (s *TableCreationParameters) SetGlobalSecondaryIndexes(v []*GlobalSecondaryIndex) *TableCreationParameters { - s.GlobalSecondaryIndexes = v - return s -} - -// SetKeySchema sets the KeySchema field's value. -func (s *TableCreationParameters) SetKeySchema(v []*KeySchemaElement) *TableCreationParameters { - s.KeySchema = v - return s -} - -// SetOnDemandThroughput sets the OnDemandThroughput field's value. -func (s *TableCreationParameters) SetOnDemandThroughput(v *OnDemandThroughput) *TableCreationParameters { - s.OnDemandThroughput = v - return s -} - -// SetProvisionedThroughput sets the ProvisionedThroughput field's value. -func (s *TableCreationParameters) SetProvisionedThroughput(v *ProvisionedThroughput) *TableCreationParameters { - s.ProvisionedThroughput = v - return s -} - -// SetSSESpecification sets the SSESpecification field's value. -func (s *TableCreationParameters) SetSSESpecification(v *SSESpecification) *TableCreationParameters { - s.SSESpecification = v - return s -} - -// SetTableName sets the TableName field's value. -func (s *TableCreationParameters) SetTableName(v string) *TableCreationParameters { - s.TableName = &v - return s -} - -// Represents the properties of a table. -type TableDescription struct { - _ struct{} `type:"structure"` - - // Contains information about the table archive. - ArchivalSummary *ArchivalSummary `type:"structure"` - - // An array of AttributeDefinition objects. Each of these objects describes - // one attribute in the table and index key schema. - // - // Each AttributeDefinition object in this array is composed of: - // - // * AttributeName - The name of the attribute. - // - // * AttributeType - The data type for the attribute. - AttributeDefinitions []*AttributeDefinition `type:"list"` - - // Contains the details for the read/write capacity mode. - BillingModeSummary *BillingModeSummary `type:"structure"` - - // The date and time when the table was created, in UNIX epoch time (http://www.epochconverter.com/) - // format. - CreationDateTime *time.Time `type:"timestamp"` - - // Indicates whether deletion protection is enabled (true) or disabled (false) - // on the table. - DeletionProtectionEnabled *bool `type:"boolean"` - - // The global secondary indexes, if any, on the table. Each index is scoped - // to a given partition key value. Each element is composed of: - // - // * Backfilling - If true, then the index is currently in the backfilling - // phase. Backfilling occurs only when a new global secondary index is added - // to the table. It is the process by which DynamoDB populates the new index - // with data from the table. (This attribute does not appear for indexes - // that were created during a CreateTable operation.) You can delete an index - // that is being created during the Backfilling phase when IndexStatus is - // set to CREATING and Backfilling is true. You can't delete the index that - // is being created when IndexStatus is set to CREATING and Backfilling is - // false. (This attribute does not appear for indexes that were created during - // a CreateTable operation.) - // - // * IndexName - The name of the global secondary index. - // - // * IndexSizeBytes - The total size of the global secondary index, in bytes. - // DynamoDB updates this value approximately every six hours. Recent changes - // might not be reflected in this value. - // - // * IndexStatus - The current status of the global secondary index: CREATING - // - The index is being created. UPDATING - The index is being updated. DELETING - // - The index is being deleted. ACTIVE - The index is ready for use. - // - // * ItemCount - The number of items in the global secondary index. DynamoDB - // updates this value approximately every six hours. Recent changes might - // not be reflected in this value. - // - // * KeySchema - Specifies the complete index key schema. The attribute names - // in the key schema must be between 1 and 255 characters (inclusive). The - // key schema must begin with the same partition key as the table. - // - // * Projection - Specifies attributes that are copied (projected) from the - // table into the index. These are in addition to the primary key attributes - // and index key attributes, which are automatically projected. Each attribute - // specification is composed of: ProjectionType - One of the following: KEYS_ONLY - // - Only the index and primary keys are projected into the index. INCLUDE - // - In addition to the attributes described in KEYS_ONLY, the secondary - // index will include other non-key attributes that you specify. ALL - All - // of the table attributes are projected into the index. NonKeyAttributes - // - A list of one or more non-key attribute names that are projected into - // the secondary index. The total count of attributes provided in NonKeyAttributes, - // summed across all of the secondary indexes, must not exceed 100. If you - // project the same attribute into two different indexes, this counts as - // two distinct attributes when determining the total. - // - // * ProvisionedThroughput - The provisioned throughput settings for the - // global secondary index, consisting of read and write capacity units, along - // with data about increases and decreases. - // - // If the table is in the DELETING state, no information about indexes will - // be returned. - GlobalSecondaryIndexes []*GlobalSecondaryIndexDescription `type:"list"` - - // Represents the version of global tables (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GlobalTables.html) - // in use, if the table is replicated across Amazon Web Services Regions. - GlobalTableVersion *string `type:"string"` - - // The number of items in the specified table. DynamoDB updates this value approximately - // every six hours. Recent changes might not be reflected in this value. - ItemCount *int64 `type:"long"` - - // The primary key structure for the table. Each KeySchemaElement consists of: - // - // * AttributeName - The name of the attribute. - // - // * KeyType - The role of the attribute: HASH - partition key RANGE - sort - // key The partition key of an item is also known as its hash attribute. - // The term "hash attribute" derives from DynamoDB's usage of an internal - // hash function to evenly distribute data items across partitions, based - // on their partition key values. The sort key of an item is also known as - // its range attribute. The term "range attribute" derives from the way DynamoDB - // stores items with the same partition key physically close together, in - // sorted order by the sort key value. - // - // For more information about primary keys, see Primary Key (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html#DataModelPrimaryKey) - // in the Amazon DynamoDB Developer Guide. - KeySchema []*KeySchemaElement `min:"1" type:"list"` - - // The Amazon Resource Name (ARN) that uniquely identifies the latest stream - // for this table. - LatestStreamArn *string `min:"37" type:"string"` - - // A timestamp, in ISO 8601 format, for this stream. - // - // Note that LatestStreamLabel is not a unique identifier for the stream, because - // it is possible that a stream from another table might have the same timestamp. - // However, the combination of the following three elements is guaranteed to - // be unique: - // - // * Amazon Web Services customer ID - // - // * Table name - // - // * StreamLabel - LatestStreamLabel *string `type:"string"` - - // Represents one or more local secondary indexes on the table. Each index is - // scoped to a given partition key value. Tables with one or more local secondary - // indexes are subject to an item collection size limit, where the amount of - // data within a given item collection cannot exceed 10 GB. Each element is - // composed of: - // - // * IndexName - The name of the local secondary index. - // - // * KeySchema - Specifies the complete index key schema. The attribute names - // in the key schema must be between 1 and 255 characters (inclusive). The - // key schema must begin with the same partition key as the table. - // - // * Projection - Specifies attributes that are copied (projected) from the - // table into the index. These are in addition to the primary key attributes - // and index key attributes, which are automatically projected. Each attribute - // specification is composed of: ProjectionType - One of the following: KEYS_ONLY - // - Only the index and primary keys are projected into the index. INCLUDE - // - Only the specified table attributes are projected into the index. The - // list of projected attributes is in NonKeyAttributes. ALL - All of the - // table attributes are projected into the index. NonKeyAttributes - A list - // of one or more non-key attribute names that are projected into the secondary - // index. The total count of attributes provided in NonKeyAttributes, summed - // across all of the secondary indexes, must not exceed 100. If you project - // the same attribute into two different indexes, this counts as two distinct - // attributes when determining the total. - // - // * IndexSizeBytes - Represents the total size of the index, in bytes. DynamoDB - // updates this value approximately every six hours. Recent changes might - // not be reflected in this value. - // - // * ItemCount - Represents the number of items in the index. DynamoDB updates - // this value approximately every six hours. Recent changes might not be - // reflected in this value. - // - // If the table is in the DELETING state, no information about indexes will - // be returned. - LocalSecondaryIndexes []*LocalSecondaryIndexDescription `type:"list"` - - // The maximum number of read and write units for the specified on-demand table. - // If you use this parameter, you must specify MaxReadRequestUnits, MaxWriteRequestUnits, - // or both. - OnDemandThroughput *OnDemandThroughput `type:"structure"` - - // The provisioned throughput settings for the table, consisting of read and - // write capacity units, along with data about increases and decreases. - ProvisionedThroughput *ProvisionedThroughputDescription `type:"structure"` - - // Represents replicas of the table. - Replicas []*ReplicaDescription `type:"list"` - - // Contains details for the restore. - RestoreSummary *RestoreSummary `type:"structure"` - - // The description of the server-side encryption status on the specified table. - SSEDescription *SSEDescription `type:"structure"` - - // The current DynamoDB Streams configuration for the table. - StreamSpecification *StreamSpecification `type:"structure"` - - // The Amazon Resource Name (ARN) that uniquely identifies the table. - TableArn *string `type:"string"` - - // Contains details of the table class. - TableClassSummary *TableClassSummary `type:"structure"` - - // Unique identifier for the table for which the backup was created. - TableId *string `type:"string"` - - // The name of the table. - TableName *string `min:"3" type:"string"` - - // The total size of the specified table, in bytes. DynamoDB updates this value - // approximately every six hours. Recent changes might not be reflected in this - // value. - TableSizeBytes *int64 `type:"long"` - - // The current state of the table: - // - // * CREATING - The table is being created. - // - // * UPDATING - The table/index configuration is being updated. The table/index - // remains available for data operations when UPDATING. - // - // * DELETING - The table is being deleted. - // - // * ACTIVE - The table is ready for use. - // - // * INACCESSIBLE_ENCRYPTION_CREDENTIALS - The KMS key used to encrypt the - // table in inaccessible. Table operations may fail due to failure to use - // the KMS key. DynamoDB will initiate the table archival process when a - // table's KMS key remains inaccessible for more than seven days. - // - // * ARCHIVING - The table is being archived. Operations are not allowed - // until archival is complete. - // - // * ARCHIVED - The table has been archived. See the ArchivalReason for more - // information. - TableStatus *string `type:"string" enum:"TableStatus"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TableDescription) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TableDescription) GoString() string { - return s.String() -} - -// SetArchivalSummary sets the ArchivalSummary field's value. -func (s *TableDescription) SetArchivalSummary(v *ArchivalSummary) *TableDescription { - s.ArchivalSummary = v - return s -} - -// SetAttributeDefinitions sets the AttributeDefinitions field's value. -func (s *TableDescription) SetAttributeDefinitions(v []*AttributeDefinition) *TableDescription { - s.AttributeDefinitions = v - return s -} - -// SetBillingModeSummary sets the BillingModeSummary field's value. -func (s *TableDescription) SetBillingModeSummary(v *BillingModeSummary) *TableDescription { - s.BillingModeSummary = v - return s -} - -// SetCreationDateTime sets the CreationDateTime field's value. -func (s *TableDescription) SetCreationDateTime(v time.Time) *TableDescription { - s.CreationDateTime = &v - return s -} - -// SetDeletionProtectionEnabled sets the DeletionProtectionEnabled field's value. -func (s *TableDescription) SetDeletionProtectionEnabled(v bool) *TableDescription { - s.DeletionProtectionEnabled = &v - return s -} - -// SetGlobalSecondaryIndexes sets the GlobalSecondaryIndexes field's value. -func (s *TableDescription) SetGlobalSecondaryIndexes(v []*GlobalSecondaryIndexDescription) *TableDescription { - s.GlobalSecondaryIndexes = v - return s -} - -// SetGlobalTableVersion sets the GlobalTableVersion field's value. -func (s *TableDescription) SetGlobalTableVersion(v string) *TableDescription { - s.GlobalTableVersion = &v - return s -} - -// SetItemCount sets the ItemCount field's value. -func (s *TableDescription) SetItemCount(v int64) *TableDescription { - s.ItemCount = &v - return s -} - -// SetKeySchema sets the KeySchema field's value. -func (s *TableDescription) SetKeySchema(v []*KeySchemaElement) *TableDescription { - s.KeySchema = v - return s -} - -// SetLatestStreamArn sets the LatestStreamArn field's value. -func (s *TableDescription) SetLatestStreamArn(v string) *TableDescription { - s.LatestStreamArn = &v - return s -} - -// SetLatestStreamLabel sets the LatestStreamLabel field's value. -func (s *TableDescription) SetLatestStreamLabel(v string) *TableDescription { - s.LatestStreamLabel = &v - return s -} - -// SetLocalSecondaryIndexes sets the LocalSecondaryIndexes field's value. -func (s *TableDescription) SetLocalSecondaryIndexes(v []*LocalSecondaryIndexDescription) *TableDescription { - s.LocalSecondaryIndexes = v - return s -} - -// SetOnDemandThroughput sets the OnDemandThroughput field's value. -func (s *TableDescription) SetOnDemandThroughput(v *OnDemandThroughput) *TableDescription { - s.OnDemandThroughput = v - return s -} - -// SetProvisionedThroughput sets the ProvisionedThroughput field's value. -func (s *TableDescription) SetProvisionedThroughput(v *ProvisionedThroughputDescription) *TableDescription { - s.ProvisionedThroughput = v - return s -} - -// SetReplicas sets the Replicas field's value. -func (s *TableDescription) SetReplicas(v []*ReplicaDescription) *TableDescription { - s.Replicas = v - return s -} - -// SetRestoreSummary sets the RestoreSummary field's value. -func (s *TableDescription) SetRestoreSummary(v *RestoreSummary) *TableDescription { - s.RestoreSummary = v - return s -} - -// SetSSEDescription sets the SSEDescription field's value. -func (s *TableDescription) SetSSEDescription(v *SSEDescription) *TableDescription { - s.SSEDescription = v - return s -} - -// SetStreamSpecification sets the StreamSpecification field's value. -func (s *TableDescription) SetStreamSpecification(v *StreamSpecification) *TableDescription { - s.StreamSpecification = v - return s -} - -// SetTableArn sets the TableArn field's value. -func (s *TableDescription) SetTableArn(v string) *TableDescription { - s.TableArn = &v - return s -} - -// SetTableClassSummary sets the TableClassSummary field's value. -func (s *TableDescription) SetTableClassSummary(v *TableClassSummary) *TableDescription { - s.TableClassSummary = v - return s -} - -// SetTableId sets the TableId field's value. -func (s *TableDescription) SetTableId(v string) *TableDescription { - s.TableId = &v - return s -} - -// SetTableName sets the TableName field's value. -func (s *TableDescription) SetTableName(v string) *TableDescription { - s.TableName = &v - return s -} - -// SetTableSizeBytes sets the TableSizeBytes field's value. -func (s *TableDescription) SetTableSizeBytes(v int64) *TableDescription { - s.TableSizeBytes = &v - return s -} - -// SetTableStatus sets the TableStatus field's value. -func (s *TableDescription) SetTableStatus(v string) *TableDescription { - s.TableStatus = &v - return s -} - -// A target table with the specified name is either being created or deleted. -type TableInUseException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TableInUseException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TableInUseException) GoString() string { - return s.String() -} - -func newErrorTableInUseException(v protocol.ResponseMetadata) error { - return &TableInUseException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *TableInUseException) Code() string { - return "TableInUseException" -} - -// Message returns the exception's message. -func (s *TableInUseException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *TableInUseException) OrigErr() error { - return nil -} - -func (s *TableInUseException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *TableInUseException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *TableInUseException) RequestID() string { - return s.RespMetadata.RequestID -} - -// A source table with the name TableName does not currently exist within the -// subscriber's account or the subscriber is operating in the wrong Amazon Web -// Services Region. -type TableNotFoundException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TableNotFoundException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TableNotFoundException) GoString() string { - return s.String() -} - -func newErrorTableNotFoundException(v protocol.ResponseMetadata) error { - return &TableNotFoundException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *TableNotFoundException) Code() string { - return "TableNotFoundException" -} - -// Message returns the exception's message. -func (s *TableNotFoundException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *TableNotFoundException) OrigErr() error { - return nil -} - -func (s *TableNotFoundException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *TableNotFoundException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *TableNotFoundException) RequestID() string { - return s.RespMetadata.RequestID -} - -// Describes a tag. A tag is a key-value pair. You can add up to 50 tags to -// a single DynamoDB table. -// -// Amazon Web Services-assigned tag names and values are automatically assigned -// the aws: prefix, which the user cannot assign. Amazon Web Services-assigned -// tag names do not count towards the tag limit of 50. User-assigned tag names -// have the prefix user: in the Cost Allocation Report. You cannot backdate -// the application of a tag. -// -// For an overview on tagging DynamoDB resources, see Tagging for DynamoDB (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html) -// in the Amazon DynamoDB Developer Guide. -type Tag struct { - _ struct{} `type:"structure"` - - // The key of the tag. Tag keys are case sensitive. Each DynamoDB table can - // only have up to one tag with the same key. If you try to add an existing - // tag (same key), the existing tag value will be updated to the new value. - // - // Key is a required field - Key *string `min:"1" type:"string" required:"true"` - - // The value of the tag. Tag values are case-sensitive and can be null. - // - // Value is a required field - Value *string `type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Tag) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Tag) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *Tag) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Tag"} - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - if s.Value == nil { - invalidParams.Add(request.NewErrParamRequired("Value")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetKey sets the Key field's value. -func (s *Tag) SetKey(v string) *Tag { - s.Key = &v - return s -} - -// SetValue sets the Value field's value. -func (s *Tag) SetValue(v string) *Tag { - s.Value = &v - return s -} - -type TagResourceInput struct { - _ struct{} `type:"structure"` - - // Identifies the Amazon DynamoDB resource to which tags should be added. This - // value is an Amazon Resource Name (ARN). - // - // ResourceArn is a required field - ResourceArn *string `min:"1" type:"string" required:"true"` - - // The tags to be assigned to the Amazon DynamoDB resource. - // - // Tags is a required field - Tags []*Tag `type:"list" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TagResourceInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TagResourceInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *TagResourceInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "TagResourceInput"} - if s.ResourceArn == nil { - invalidParams.Add(request.NewErrParamRequired("ResourceArn")) - } - if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) - } - if s.Tags == nil { - invalidParams.Add(request.NewErrParamRequired("Tags")) - } - if s.Tags != nil { - for i, v := range s.Tags { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetResourceArn sets the ResourceArn field's value. -func (s *TagResourceInput) SetResourceArn(v string) *TagResourceInput { - s.ResourceArn = &v - return s -} - -// SetTags sets the Tags field's value. -func (s *TagResourceInput) SetTags(v []*Tag) *TagResourceInput { - s.Tags = v - return s -} - -type TagResourceOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TagResourceOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TagResourceOutput) GoString() string { - return s.String() -} - -// The description of the Time to Live (TTL) status on the specified table. -type TimeToLiveDescription struct { - _ struct{} `type:"structure"` - - // The name of the TTL attribute for items in the table. - AttributeName *string `min:"1" type:"string"` - - // The TTL status for the table. - TimeToLiveStatus *string `type:"string" enum:"TimeToLiveStatus"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TimeToLiveDescription) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TimeToLiveDescription) GoString() string { - return s.String() -} - -// SetAttributeName sets the AttributeName field's value. -func (s *TimeToLiveDescription) SetAttributeName(v string) *TimeToLiveDescription { - s.AttributeName = &v - return s -} - -// SetTimeToLiveStatus sets the TimeToLiveStatus field's value. -func (s *TimeToLiveDescription) SetTimeToLiveStatus(v string) *TimeToLiveDescription { - s.TimeToLiveStatus = &v - return s -} - -// Represents the settings used to enable or disable Time to Live (TTL) for -// the specified table. -type TimeToLiveSpecification struct { - _ struct{} `type:"structure"` - - // The name of the TTL attribute used to store the expiration time for items - // in the table. - // - // AttributeName is a required field - AttributeName *string `min:"1" type:"string" required:"true"` - - // Indicates whether TTL is to be enabled (true) or disabled (false) on the - // table. - // - // Enabled is a required field - Enabled *bool `type:"boolean" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TimeToLiveSpecification) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TimeToLiveSpecification) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *TimeToLiveSpecification) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "TimeToLiveSpecification"} - if s.AttributeName == nil { - invalidParams.Add(request.NewErrParamRequired("AttributeName")) - } - if s.AttributeName != nil && len(*s.AttributeName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("AttributeName", 1)) - } - if s.Enabled == nil { - invalidParams.Add(request.NewErrParamRequired("Enabled")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAttributeName sets the AttributeName field's value. -func (s *TimeToLiveSpecification) SetAttributeName(v string) *TimeToLiveSpecification { - s.AttributeName = &v - return s -} - -// SetEnabled sets the Enabled field's value. -func (s *TimeToLiveSpecification) SetEnabled(v bool) *TimeToLiveSpecification { - s.Enabled = &v - return s -} - -// Specifies an item to be retrieved as part of the transaction. -type TransactGetItem struct { - _ struct{} `type:"structure"` - - // Contains the primary key that identifies the item to get, together with the - // name of the table that contains the item, and optionally the specific attributes - // of the item to retrieve. - // - // Get is a required field - Get *Get `type:"structure" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TransactGetItem) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TransactGetItem) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *TransactGetItem) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "TransactGetItem"} - if s.Get == nil { - invalidParams.Add(request.NewErrParamRequired("Get")) - } - if s.Get != nil { - if err := s.Get.Validate(); err != nil { - invalidParams.AddNested("Get", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetGet sets the Get field's value. -func (s *TransactGetItem) SetGet(v *Get) *TransactGetItem { - s.Get = v - return s -} - -type TransactGetItemsInput struct { - _ struct{} `type:"structure"` - - // A value of TOTAL causes consumed capacity information to be returned, and - // a value of NONE prevents that information from being returned. No other value - // is valid. - ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` - - // An ordered array of up to 100 TransactGetItem objects, each of which contains - // a Get structure. - // - // TransactItems is a required field - TransactItems []*TransactGetItem `min:"1" type:"list" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TransactGetItemsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TransactGetItemsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *TransactGetItemsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "TransactGetItemsInput"} - if s.TransactItems == nil { - invalidParams.Add(request.NewErrParamRequired("TransactItems")) - } - if s.TransactItems != nil && len(s.TransactItems) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TransactItems", 1)) - } - if s.TransactItems != nil { - for i, v := range s.TransactItems { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TransactItems", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value. -func (s *TransactGetItemsInput) SetReturnConsumedCapacity(v string) *TransactGetItemsInput { - s.ReturnConsumedCapacity = &v - return s -} - -// SetTransactItems sets the TransactItems field's value. -func (s *TransactGetItemsInput) SetTransactItems(v []*TransactGetItem) *TransactGetItemsInput { - s.TransactItems = v - return s -} - -type TransactGetItemsOutput struct { - _ struct{} `type:"structure"` - - // If the ReturnConsumedCapacity value was TOTAL, this is an array of ConsumedCapacity - // objects, one for each table addressed by TransactGetItem objects in the TransactItems - // parameter. These ConsumedCapacity objects report the read-capacity units - // consumed by the TransactGetItems call in that table. - ConsumedCapacity []*ConsumedCapacity `type:"list"` - - // An ordered array of up to 100 ItemResponse objects, each of which corresponds - // to the TransactGetItem object in the same position in the TransactItems array. - // Each ItemResponse object contains a Map of the name-value pairs that are - // the projected attributes of the requested item. - // - // If a requested item could not be retrieved, the corresponding ItemResponse - // object is Null, or if the requested item has no projected attributes, the - // corresponding ItemResponse object is an empty Map. - Responses []*ItemResponse `min:"1" type:"list"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TransactGetItemsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TransactGetItemsOutput) GoString() string { - return s.String() -} - -// SetConsumedCapacity sets the ConsumedCapacity field's value. -func (s *TransactGetItemsOutput) SetConsumedCapacity(v []*ConsumedCapacity) *TransactGetItemsOutput { - s.ConsumedCapacity = v - return s -} - -// SetResponses sets the Responses field's value. -func (s *TransactGetItemsOutput) SetResponses(v []*ItemResponse) *TransactGetItemsOutput { - s.Responses = v - return s -} - -// A list of requests that can perform update, put, delete, or check operations -// on multiple items in one or more tables atomically. -type TransactWriteItem struct { - _ struct{} `type:"structure"` - - // A request to perform a check item operation. - ConditionCheck *ConditionCheck `type:"structure"` - - // A request to perform a DeleteItem operation. - Delete *Delete `type:"structure"` - - // A request to perform a PutItem operation. - Put *Put `type:"structure"` - - // A request to perform an UpdateItem operation. - Update *Update `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TransactWriteItem) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TransactWriteItem) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *TransactWriteItem) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "TransactWriteItem"} - if s.ConditionCheck != nil { - if err := s.ConditionCheck.Validate(); err != nil { - invalidParams.AddNested("ConditionCheck", err.(request.ErrInvalidParams)) - } - } - if s.Delete != nil { - if err := s.Delete.Validate(); err != nil { - invalidParams.AddNested("Delete", err.(request.ErrInvalidParams)) - } - } - if s.Put != nil { - if err := s.Put.Validate(); err != nil { - invalidParams.AddNested("Put", err.(request.ErrInvalidParams)) - } - } - if s.Update != nil { - if err := s.Update.Validate(); err != nil { - invalidParams.AddNested("Update", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetConditionCheck sets the ConditionCheck field's value. -func (s *TransactWriteItem) SetConditionCheck(v *ConditionCheck) *TransactWriteItem { - s.ConditionCheck = v - return s -} - -// SetDelete sets the Delete field's value. -func (s *TransactWriteItem) SetDelete(v *Delete) *TransactWriteItem { - s.Delete = v - return s -} - -// SetPut sets the Put field's value. -func (s *TransactWriteItem) SetPut(v *Put) *TransactWriteItem { - s.Put = v - return s -} - -// SetUpdate sets the Update field's value. -func (s *TransactWriteItem) SetUpdate(v *Update) *TransactWriteItem { - s.Update = v - return s -} - -type TransactWriteItemsInput struct { - _ struct{} `type:"structure"` - - // Providing a ClientRequestToken makes the call to TransactWriteItems idempotent, - // meaning that multiple identical calls have the same effect as one single - // call. - // - // Although multiple identical calls using the same client request token produce - // the same result on the server (no side effects), the responses to the calls - // might not be the same. If the ReturnConsumedCapacity parameter is set, then - // the initial TransactWriteItems call returns the amount of write capacity - // units consumed in making the changes. Subsequent TransactWriteItems calls - // with the same client token return the number of read capacity units consumed - // in reading the item. - // - // A client request token is valid for 10 minutes after the first request that - // uses it is completed. After 10 minutes, any request with the same client - // token is treated as a new request. Do not resubmit the same request with - // the same client token for more than 10 minutes, or the result might not be - // idempotent. - // - // If you submit a request with the same client token but a change in other - // parameters within the 10-minute idempotency window, DynamoDB returns an IdempotentParameterMismatch - // exception. - ClientRequestToken *string `min:"1" type:"string" idempotencyToken:"true"` - - // Determines the level of detail about either provisioned or on-demand throughput - // consumption that is returned in the response: - // - // * INDEXES - The response includes the aggregate ConsumedCapacity for the - // operation, together with ConsumedCapacity for each table and secondary - // index that was accessed. Note that some operations, such as GetItem and - // BatchGetItem, do not access any indexes at all. In these cases, specifying - // INDEXES will only return ConsumedCapacity information for table(s). - // - // * TOTAL - The response includes only the aggregate ConsumedCapacity for - // the operation. - // - // * NONE - No ConsumedCapacity details are included in the response. - ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` - - // Determines whether item collection metrics are returned. If set to SIZE, - // the response includes statistics about item collections (if any), that were - // modified during the operation and are returned in the response. If set to - // NONE (the default), no statistics are returned. - ReturnItemCollectionMetrics *string `type:"string" enum:"ReturnItemCollectionMetrics"` - - // An ordered array of up to 100 TransactWriteItem objects, each of which contains - // a ConditionCheck, Put, Update, or Delete object. These can operate on items - // in different tables, but the tables must reside in the same Amazon Web Services - // account and Region, and no two of them can operate on the same item. - // - // TransactItems is a required field - TransactItems []*TransactWriteItem `min:"1" type:"list" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TransactWriteItemsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TransactWriteItemsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *TransactWriteItemsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "TransactWriteItemsInput"} - if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 1)) - } - if s.TransactItems == nil { - invalidParams.Add(request.NewErrParamRequired("TransactItems")) - } - if s.TransactItems != nil && len(s.TransactItems) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TransactItems", 1)) - } - if s.TransactItems != nil { - for i, v := range s.TransactItems { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TransactItems", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetClientRequestToken sets the ClientRequestToken field's value. -func (s *TransactWriteItemsInput) SetClientRequestToken(v string) *TransactWriteItemsInput { - s.ClientRequestToken = &v - return s -} - -// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value. -func (s *TransactWriteItemsInput) SetReturnConsumedCapacity(v string) *TransactWriteItemsInput { - s.ReturnConsumedCapacity = &v - return s -} - -// SetReturnItemCollectionMetrics sets the ReturnItemCollectionMetrics field's value. -func (s *TransactWriteItemsInput) SetReturnItemCollectionMetrics(v string) *TransactWriteItemsInput { - s.ReturnItemCollectionMetrics = &v - return s -} - -// SetTransactItems sets the TransactItems field's value. -func (s *TransactWriteItemsInput) SetTransactItems(v []*TransactWriteItem) *TransactWriteItemsInput { - s.TransactItems = v - return s -} - -type TransactWriteItemsOutput struct { - _ struct{} `type:"structure"` - - // The capacity units consumed by the entire TransactWriteItems operation. The - // values of the list are ordered according to the ordering of the TransactItems - // request parameter. - ConsumedCapacity []*ConsumedCapacity `type:"list"` - - // A list of tables that were processed by TransactWriteItems and, for each - // table, information about any item collections that were affected by individual - // UpdateItem, PutItem, or DeleteItem operations. - ItemCollectionMetrics map[string][]*ItemCollectionMetrics `type:"map"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TransactWriteItemsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TransactWriteItemsOutput) GoString() string { - return s.String() -} - -// SetConsumedCapacity sets the ConsumedCapacity field's value. -func (s *TransactWriteItemsOutput) SetConsumedCapacity(v []*ConsumedCapacity) *TransactWriteItemsOutput { - s.ConsumedCapacity = v - return s -} - -// SetItemCollectionMetrics sets the ItemCollectionMetrics field's value. -func (s *TransactWriteItemsOutput) SetItemCollectionMetrics(v map[string][]*ItemCollectionMetrics) *TransactWriteItemsOutput { - s.ItemCollectionMetrics = v - return s -} - -// The entire transaction request was canceled. -// -// DynamoDB cancels a TransactWriteItems request under the following circumstances: -// -// - A condition in one of the condition expressions is not met. -// -// - A table in the TransactWriteItems request is in a different account -// or region. -// -// - More than one action in the TransactWriteItems operation targets the -// same item. -// -// - There is insufficient provisioned capacity for the transaction to be -// completed. -// -// - An item size becomes too large (larger than 400 KB), or a local secondary -// index (LSI) becomes too large, or a similar validation error occurs because -// of changes made by the transaction. -// -// - There is a user error, such as an invalid data format. -// -// - There is an ongoing TransactWriteItems operation that conflicts with -// a concurrent TransactWriteItems request. In this case the TransactWriteItems -// operation fails with a TransactionCanceledException. -// -// DynamoDB cancels a TransactGetItems request under the following circumstances: -// -// - There is an ongoing TransactGetItems operation that conflicts with a -// concurrent PutItem, UpdateItem, DeleteItem or TransactWriteItems request. -// In this case the TransactGetItems operation fails with a TransactionCanceledException. -// -// - A table in the TransactGetItems request is in a different account or -// region. -// -// - There is insufficient provisioned capacity for the transaction to be -// completed. -// -// - There is a user error, such as an invalid data format. -// -// If using Java, DynamoDB lists the cancellation reasons on the CancellationReasons -// property. This property is not set for other languages. Transaction cancellation -// reasons are ordered in the order of requested items, if an item has no error -// it will have None code and Null message. -// -// Cancellation reason codes and possible error messages: -// -// - No Errors: Code: None Message: null -// -// - Conditional Check Failed: Code: ConditionalCheckFailed Message: The -// conditional request failed. -// -// - Item Collection Size Limit Exceeded: Code: ItemCollectionSizeLimitExceeded -// Message: Collection size exceeded. -// -// - Transaction Conflict: Code: TransactionConflict Message: Transaction -// is ongoing for the item. -// -// - Provisioned Throughput Exceeded: Code: ProvisionedThroughputExceeded -// Messages: The level of configured provisioned throughput for the table -// was exceeded. Consider increasing your provisioning level with the UpdateTable -// API. This Message is received when provisioned throughput is exceeded -// is on a provisioned DynamoDB table. The level of configured provisioned -// throughput for one or more global secondary indexes of the table was exceeded. -// Consider increasing your provisioning level for the under-provisioned -// global secondary indexes with the UpdateTable API. This message is returned -// when provisioned throughput is exceeded is on a provisioned GSI. -// -// - Throttling Error: Code: ThrottlingError Messages: Throughput exceeds -// the current capacity of your table or index. DynamoDB is automatically -// scaling your table or index so please try again shortly. If exceptions -// persist, check if you have a hot key: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/bp-partition-key-design.html. -// This message is returned when writes get throttled on an On-Demand table -// as DynamoDB is automatically scaling the table. Throughput exceeds the -// current capacity for one or more global secondary indexes. DynamoDB is -// automatically scaling your index so please try again shortly. This message -// is returned when writes get throttled on an On-Demand GSI as DynamoDB -// is automatically scaling the GSI. -// -// - Validation Error: Code: ValidationError Messages: One or more parameter -// values were invalid. The update expression attempted to update the secondary -// index key beyond allowed size limits. The update expression attempted -// to update the secondary index key to unsupported type. An operand in the -// update expression has an incorrect data type. Item size to update has -// exceeded the maximum allowed size. Number overflow. Attempting to store -// a number with magnitude larger than supported range. Type mismatch for -// attribute to update. Nesting Levels have exceeded supported limits. The -// document path provided in the update expression is invalid for update. -// The provided expression refers to an attribute that does not exist in -// the item. -type TransactionCanceledException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - // A list of cancellation reasons. - CancellationReasons []*CancellationReason `min:"1" type:"list"` - - Message_ *string `locationName:"Message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TransactionCanceledException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TransactionCanceledException) GoString() string { - return s.String() -} - -func newErrorTransactionCanceledException(v protocol.ResponseMetadata) error { - return &TransactionCanceledException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *TransactionCanceledException) Code() string { - return "TransactionCanceledException" -} - -// Message returns the exception's message. -func (s *TransactionCanceledException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *TransactionCanceledException) OrigErr() error { - return nil -} - -func (s *TransactionCanceledException) Error() string { - return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *TransactionCanceledException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *TransactionCanceledException) RequestID() string { - return s.RespMetadata.RequestID -} - -// Operation was rejected because there is an ongoing transaction for the item. -type TransactionConflictException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TransactionConflictException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TransactionConflictException) GoString() string { - return s.String() -} - -func newErrorTransactionConflictException(v protocol.ResponseMetadata) error { - return &TransactionConflictException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *TransactionConflictException) Code() string { - return "TransactionConflictException" -} - -// Message returns the exception's message. -func (s *TransactionConflictException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *TransactionConflictException) OrigErr() error { - return nil -} - -func (s *TransactionConflictException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *TransactionConflictException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *TransactionConflictException) RequestID() string { - return s.RespMetadata.RequestID -} - -// The transaction with the given request token is already in progress. -// -// # Recommended Settings -// -// This is a general recommendation for handling the TransactionInProgressException. -// These settings help ensure that the client retries will trigger completion -// of the ongoing TransactWriteItems request. -// -// - Set clientExecutionTimeout to a value that allows at least one retry -// to be processed after 5 seconds have elapsed since the first attempt for -// the TransactWriteItems operation. -// -// - Set socketTimeout to a value a little lower than the requestTimeout -// setting. -// -// - requestTimeout should be set based on the time taken for the individual -// retries of a single HTTP request for your use case, but setting it to -// 1 second or higher should work well to reduce chances of retries and TransactionInProgressException -// errors. -// -// - Use exponential backoff when retrying and tune backoff if needed. -// -// Assuming default retry policy (https://github.com/aws/aws-sdk-java/blob/fd409dee8ae23fb8953e0bb4dbde65536a7e0514/aws-java-sdk-core/src/main/java/com/amazonaws/retry/PredefinedRetryPolicies.java#L97), -// example timeout settings based on the guidelines above are as follows: -// -// Example timeline: -// -// - 0-1000 first attempt -// -// - 1000-1500 first sleep/delay (default retry policy uses 500 ms as base -// delay for 4xx errors) -// -// - 1500-2500 second attempt -// -// - 2500-3500 second sleep/delay (500 * 2, exponential backoff) -// -// - 3500-4500 third attempt -// -// - 4500-6500 third sleep/delay (500 * 2^2) -// -// - 6500-7500 fourth attempt (this can trigger inline recovery since 5 seconds -// have elapsed since the first attempt reached TC) -type TransactionInProgressException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"Message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TransactionInProgressException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TransactionInProgressException) GoString() string { - return s.String() -} - -func newErrorTransactionInProgressException(v protocol.ResponseMetadata) error { - return &TransactionInProgressException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *TransactionInProgressException) Code() string { - return "TransactionInProgressException" -} - -// Message returns the exception's message. -func (s *TransactionInProgressException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *TransactionInProgressException) OrigErr() error { - return nil -} - -func (s *TransactionInProgressException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *TransactionInProgressException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *TransactionInProgressException) RequestID() string { - return s.RespMetadata.RequestID -} - -type UntagResourceInput struct { - _ struct{} `type:"structure"` - - // The DynamoDB resource that the tags will be removed from. This value is an - // Amazon Resource Name (ARN). - // - // ResourceArn is a required field - ResourceArn *string `min:"1" type:"string" required:"true"` - - // A list of tag keys. Existing tags of the resource whose keys are members - // of this list will be removed from the DynamoDB resource. - // - // TagKeys is a required field - TagKeys []*string `type:"list" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UntagResourceInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UntagResourceInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UntagResourceInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UntagResourceInput"} - if s.ResourceArn == nil { - invalidParams.Add(request.NewErrParamRequired("ResourceArn")) - } - if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) - } - if s.TagKeys == nil { - invalidParams.Add(request.NewErrParamRequired("TagKeys")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetResourceArn sets the ResourceArn field's value. -func (s *UntagResourceInput) SetResourceArn(v string) *UntagResourceInput { - s.ResourceArn = &v - return s -} - -// SetTagKeys sets the TagKeys field's value. -func (s *UntagResourceInput) SetTagKeys(v []*string) *UntagResourceInput { - s.TagKeys = v - return s -} - -type UntagResourceOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UntagResourceOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UntagResourceOutput) GoString() string { - return s.String() -} - -// Represents a request to perform an UpdateItem operation. -type Update struct { - _ struct{} `type:"structure"` - - // A condition that must be satisfied in order for a conditional update to succeed. - ConditionExpression *string `type:"string"` - - // One or more substitution tokens for attribute names in an expression. - ExpressionAttributeNames map[string]*string `type:"map"` - - // One or more values that can be substituted in an expression. - ExpressionAttributeValues map[string]*AttributeValue `type:"map"` - - // The primary key of the item to be updated. Each element consists of an attribute - // name and a value for that attribute. - // - // Key is a required field - Key map[string]*AttributeValue `type:"map" required:"true"` - - // Use ReturnValuesOnConditionCheckFailure to get the item attributes if the - // Update condition fails. For ReturnValuesOnConditionCheckFailure, the valid - // values are: NONE and ALL_OLD. - ReturnValuesOnConditionCheckFailure *string `type:"string" enum:"ReturnValuesOnConditionCheckFailure"` - - // Name of the table for the UpdateItem request. You can also provide the Amazon - // Resource Name (ARN) of the table in this parameter. - // - // TableName is a required field - TableName *string `min:"1" type:"string" required:"true"` - - // An expression that defines one or more attributes to be updated, the action - // to be performed on them, and new value(s) for them. - // - // UpdateExpression is a required field - UpdateExpression *string `type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Update) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Update) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *Update) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Update"} - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.TableName == nil { - invalidParams.Add(request.NewErrParamRequired("TableName")) - } - if s.TableName != nil && len(*s.TableName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) - } - if s.UpdateExpression == nil { - invalidParams.Add(request.NewErrParamRequired("UpdateExpression")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetConditionExpression sets the ConditionExpression field's value. -func (s *Update) SetConditionExpression(v string) *Update { - s.ConditionExpression = &v - return s -} - -// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value. -func (s *Update) SetExpressionAttributeNames(v map[string]*string) *Update { - s.ExpressionAttributeNames = v - return s -} - -// SetExpressionAttributeValues sets the ExpressionAttributeValues field's value. -func (s *Update) SetExpressionAttributeValues(v map[string]*AttributeValue) *Update { - s.ExpressionAttributeValues = v - return s -} - -// SetKey sets the Key field's value. -func (s *Update) SetKey(v map[string]*AttributeValue) *Update { - s.Key = v - return s -} - -// SetReturnValuesOnConditionCheckFailure sets the ReturnValuesOnConditionCheckFailure field's value. -func (s *Update) SetReturnValuesOnConditionCheckFailure(v string) *Update { - s.ReturnValuesOnConditionCheckFailure = &v - return s -} - -// SetTableName sets the TableName field's value. -func (s *Update) SetTableName(v string) *Update { - s.TableName = &v - return s -} - -// SetUpdateExpression sets the UpdateExpression field's value. -func (s *Update) SetUpdateExpression(v string) *Update { - s.UpdateExpression = &v - return s -} - -type UpdateContinuousBackupsInput struct { - _ struct{} `type:"structure"` - - // Represents the settings used to enable point in time recovery. - // - // PointInTimeRecoverySpecification is a required field - PointInTimeRecoverySpecification *PointInTimeRecoverySpecification `type:"structure" required:"true"` - - // The name of the table. You can also provide the Amazon Resource Name (ARN) - // of the table in this parameter. - // - // TableName is a required field - TableName *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateContinuousBackupsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateContinuousBackupsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateContinuousBackupsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateContinuousBackupsInput"} - if s.PointInTimeRecoverySpecification == nil { - invalidParams.Add(request.NewErrParamRequired("PointInTimeRecoverySpecification")) - } - if s.TableName == nil { - invalidParams.Add(request.NewErrParamRequired("TableName")) - } - if s.TableName != nil && len(*s.TableName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) - } - if s.PointInTimeRecoverySpecification != nil { - if err := s.PointInTimeRecoverySpecification.Validate(); err != nil { - invalidParams.AddNested("PointInTimeRecoverySpecification", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetPointInTimeRecoverySpecification sets the PointInTimeRecoverySpecification field's value. -func (s *UpdateContinuousBackupsInput) SetPointInTimeRecoverySpecification(v *PointInTimeRecoverySpecification) *UpdateContinuousBackupsInput { - s.PointInTimeRecoverySpecification = v - return s -} - -// SetTableName sets the TableName field's value. -func (s *UpdateContinuousBackupsInput) SetTableName(v string) *UpdateContinuousBackupsInput { - s.TableName = &v - return s -} - -type UpdateContinuousBackupsOutput struct { - _ struct{} `type:"structure"` - - // Represents the continuous backups and point in time recovery settings on - // the table. - ContinuousBackupsDescription *ContinuousBackupsDescription `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateContinuousBackupsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateContinuousBackupsOutput) GoString() string { - return s.String() -} - -// SetContinuousBackupsDescription sets the ContinuousBackupsDescription field's value. -func (s *UpdateContinuousBackupsOutput) SetContinuousBackupsDescription(v *ContinuousBackupsDescription) *UpdateContinuousBackupsOutput { - s.ContinuousBackupsDescription = v - return s -} - -type UpdateContributorInsightsInput struct { - _ struct{} `type:"structure"` - - // Represents the contributor insights action. - // - // ContributorInsightsAction is a required field - ContributorInsightsAction *string `type:"string" required:"true" enum:"ContributorInsightsAction"` - - // The global secondary index name, if applicable. - IndexName *string `min:"3" type:"string"` - - // The name of the table. You can also provide the Amazon Resource Name (ARN) - // of the table in this parameter. - // - // TableName is a required field - TableName *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateContributorInsightsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateContributorInsightsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateContributorInsightsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateContributorInsightsInput"} - if s.ContributorInsightsAction == nil { - invalidParams.Add(request.NewErrParamRequired("ContributorInsightsAction")) - } - if s.IndexName != nil && len(*s.IndexName) < 3 { - invalidParams.Add(request.NewErrParamMinLen("IndexName", 3)) - } - if s.TableName == nil { - invalidParams.Add(request.NewErrParamRequired("TableName")) - } - if s.TableName != nil && len(*s.TableName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetContributorInsightsAction sets the ContributorInsightsAction field's value. -func (s *UpdateContributorInsightsInput) SetContributorInsightsAction(v string) *UpdateContributorInsightsInput { - s.ContributorInsightsAction = &v - return s -} - -// SetIndexName sets the IndexName field's value. -func (s *UpdateContributorInsightsInput) SetIndexName(v string) *UpdateContributorInsightsInput { - s.IndexName = &v - return s -} - -// SetTableName sets the TableName field's value. -func (s *UpdateContributorInsightsInput) SetTableName(v string) *UpdateContributorInsightsInput { - s.TableName = &v - return s -} - -type UpdateContributorInsightsOutput struct { - _ struct{} `type:"structure"` - - // The status of contributor insights - ContributorInsightsStatus *string `type:"string" enum:"ContributorInsightsStatus"` - - // The name of the global secondary index, if applicable. - IndexName *string `min:"3" type:"string"` - - // The name of the table. - TableName *string `min:"3" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateContributorInsightsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateContributorInsightsOutput) GoString() string { - return s.String() -} - -// SetContributorInsightsStatus sets the ContributorInsightsStatus field's value. -func (s *UpdateContributorInsightsOutput) SetContributorInsightsStatus(v string) *UpdateContributorInsightsOutput { - s.ContributorInsightsStatus = &v - return s -} - -// SetIndexName sets the IndexName field's value. -func (s *UpdateContributorInsightsOutput) SetIndexName(v string) *UpdateContributorInsightsOutput { - s.IndexName = &v - return s -} - -// SetTableName sets the TableName field's value. -func (s *UpdateContributorInsightsOutput) SetTableName(v string) *UpdateContributorInsightsOutput { - s.TableName = &v - return s -} - -// Represents the new provisioned throughput settings to be applied to a global -// secondary index. -type UpdateGlobalSecondaryIndexAction struct { - _ struct{} `type:"structure"` - - // The name of the global secondary index to be updated. - // - // IndexName is a required field - IndexName *string `min:"3" type:"string" required:"true"` - - // Updates the maximum number of read and write units for the specified global - // secondary index. If you use this parameter, you must specify MaxReadRequestUnits, - // MaxWriteRequestUnits, or both. - OnDemandThroughput *OnDemandThroughput `type:"structure"` - - // Represents the provisioned throughput settings for the specified global secondary - // index. - // - // For current minimum and maximum provisioned throughput values, see Service, - // Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) - // in the Amazon DynamoDB Developer Guide. - ProvisionedThroughput *ProvisionedThroughput `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateGlobalSecondaryIndexAction) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateGlobalSecondaryIndexAction) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateGlobalSecondaryIndexAction) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateGlobalSecondaryIndexAction"} - if s.IndexName == nil { - invalidParams.Add(request.NewErrParamRequired("IndexName")) - } - if s.IndexName != nil && len(*s.IndexName) < 3 { - invalidParams.Add(request.NewErrParamMinLen("IndexName", 3)) - } - if s.ProvisionedThroughput != nil { - if err := s.ProvisionedThroughput.Validate(); err != nil { - invalidParams.AddNested("ProvisionedThroughput", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetIndexName sets the IndexName field's value. -func (s *UpdateGlobalSecondaryIndexAction) SetIndexName(v string) *UpdateGlobalSecondaryIndexAction { - s.IndexName = &v - return s -} - -// SetOnDemandThroughput sets the OnDemandThroughput field's value. -func (s *UpdateGlobalSecondaryIndexAction) SetOnDemandThroughput(v *OnDemandThroughput) *UpdateGlobalSecondaryIndexAction { - s.OnDemandThroughput = v - return s -} - -// SetProvisionedThroughput sets the ProvisionedThroughput field's value. -func (s *UpdateGlobalSecondaryIndexAction) SetProvisionedThroughput(v *ProvisionedThroughput) *UpdateGlobalSecondaryIndexAction { - s.ProvisionedThroughput = v - return s -} - -type UpdateGlobalTableInput struct { - _ struct{} `type:"structure"` - - // The global table name. - // - // GlobalTableName is a required field - GlobalTableName *string `min:"3" type:"string" required:"true"` - - // A list of Regions that should be added or removed from the global table. - // - // ReplicaUpdates is a required field - ReplicaUpdates []*ReplicaUpdate `type:"list" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateGlobalTableInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateGlobalTableInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateGlobalTableInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateGlobalTableInput"} - if s.GlobalTableName == nil { - invalidParams.Add(request.NewErrParamRequired("GlobalTableName")) - } - if s.GlobalTableName != nil && len(*s.GlobalTableName) < 3 { - invalidParams.Add(request.NewErrParamMinLen("GlobalTableName", 3)) - } - if s.ReplicaUpdates == nil { - invalidParams.Add(request.NewErrParamRequired("ReplicaUpdates")) - } - if s.ReplicaUpdates != nil { - for i, v := range s.ReplicaUpdates { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ReplicaUpdates", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetGlobalTableName sets the GlobalTableName field's value. -func (s *UpdateGlobalTableInput) SetGlobalTableName(v string) *UpdateGlobalTableInput { - s.GlobalTableName = &v - return s -} - -// SetReplicaUpdates sets the ReplicaUpdates field's value. -func (s *UpdateGlobalTableInput) SetReplicaUpdates(v []*ReplicaUpdate) *UpdateGlobalTableInput { - s.ReplicaUpdates = v - return s -} - -type UpdateGlobalTableOutput struct { - _ struct{} `type:"structure"` - - // Contains the details of the global table. - GlobalTableDescription *GlobalTableDescription `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateGlobalTableOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateGlobalTableOutput) GoString() string { - return s.String() -} - -// SetGlobalTableDescription sets the GlobalTableDescription field's value. -func (s *UpdateGlobalTableOutput) SetGlobalTableDescription(v *GlobalTableDescription) *UpdateGlobalTableOutput { - s.GlobalTableDescription = v - return s -} - -type UpdateGlobalTableSettingsInput struct { - _ struct{} `type:"structure"` - - // The billing mode of the global table. If GlobalTableBillingMode is not specified, - // the global table defaults to PROVISIONED capacity billing mode. - // - // * PROVISIONED - We recommend using PROVISIONED for predictable workloads. - // PROVISIONED sets the billing mode to Provisioned capacity mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/provisioned-capacity-mode.html). - // - // * PAY_PER_REQUEST - We recommend using PAY_PER_REQUEST for unpredictable - // workloads. PAY_PER_REQUEST sets the billing mode to On-demand capacity - // mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/on-demand-capacity-mode.html). - GlobalTableBillingMode *string `type:"string" enum:"BillingMode"` - - // Represents the settings of a global secondary index for a global table that - // will be modified. - GlobalTableGlobalSecondaryIndexSettingsUpdate []*GlobalTableGlobalSecondaryIndexSettingsUpdate `min:"1" type:"list"` - - // The name of the global table - // - // GlobalTableName is a required field - GlobalTableName *string `min:"3" type:"string" required:"true"` - - // Auto scaling settings for managing provisioned write capacity for the global - // table. - GlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate *AutoScalingSettingsUpdate `type:"structure"` - - // The maximum number of writes consumed per second before DynamoDB returns - // a ThrottlingException. - GlobalTableProvisionedWriteCapacityUnits *int64 `min:"1" type:"long"` - - // Represents the settings for a global table in a Region that will be modified. - ReplicaSettingsUpdate []*ReplicaSettingsUpdate `min:"1" type:"list"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateGlobalTableSettingsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateGlobalTableSettingsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateGlobalTableSettingsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateGlobalTableSettingsInput"} - if s.GlobalTableGlobalSecondaryIndexSettingsUpdate != nil && len(s.GlobalTableGlobalSecondaryIndexSettingsUpdate) < 1 { - invalidParams.Add(request.NewErrParamMinLen("GlobalTableGlobalSecondaryIndexSettingsUpdate", 1)) - } - if s.GlobalTableName == nil { - invalidParams.Add(request.NewErrParamRequired("GlobalTableName")) - } - if s.GlobalTableName != nil && len(*s.GlobalTableName) < 3 { - invalidParams.Add(request.NewErrParamMinLen("GlobalTableName", 3)) - } - if s.GlobalTableProvisionedWriteCapacityUnits != nil && *s.GlobalTableProvisionedWriteCapacityUnits < 1 { - invalidParams.Add(request.NewErrParamMinValue("GlobalTableProvisionedWriteCapacityUnits", 1)) - } - if s.ReplicaSettingsUpdate != nil && len(s.ReplicaSettingsUpdate) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ReplicaSettingsUpdate", 1)) - } - if s.GlobalTableGlobalSecondaryIndexSettingsUpdate != nil { - for i, v := range s.GlobalTableGlobalSecondaryIndexSettingsUpdate { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "GlobalTableGlobalSecondaryIndexSettingsUpdate", i), err.(request.ErrInvalidParams)) - } - } - } - if s.GlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate != nil { - if err := s.GlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate.Validate(); err != nil { - invalidParams.AddNested("GlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate", err.(request.ErrInvalidParams)) - } - } - if s.ReplicaSettingsUpdate != nil { - for i, v := range s.ReplicaSettingsUpdate { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ReplicaSettingsUpdate", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetGlobalTableBillingMode sets the GlobalTableBillingMode field's value. -func (s *UpdateGlobalTableSettingsInput) SetGlobalTableBillingMode(v string) *UpdateGlobalTableSettingsInput { - s.GlobalTableBillingMode = &v - return s -} - -// SetGlobalTableGlobalSecondaryIndexSettingsUpdate sets the GlobalTableGlobalSecondaryIndexSettingsUpdate field's value. -func (s *UpdateGlobalTableSettingsInput) SetGlobalTableGlobalSecondaryIndexSettingsUpdate(v []*GlobalTableGlobalSecondaryIndexSettingsUpdate) *UpdateGlobalTableSettingsInput { - s.GlobalTableGlobalSecondaryIndexSettingsUpdate = v - return s -} - -// SetGlobalTableName sets the GlobalTableName field's value. -func (s *UpdateGlobalTableSettingsInput) SetGlobalTableName(v string) *UpdateGlobalTableSettingsInput { - s.GlobalTableName = &v - return s -} - -// SetGlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate sets the GlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate field's value. -func (s *UpdateGlobalTableSettingsInput) SetGlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate(v *AutoScalingSettingsUpdate) *UpdateGlobalTableSettingsInput { - s.GlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate = v - return s -} - -// SetGlobalTableProvisionedWriteCapacityUnits sets the GlobalTableProvisionedWriteCapacityUnits field's value. -func (s *UpdateGlobalTableSettingsInput) SetGlobalTableProvisionedWriteCapacityUnits(v int64) *UpdateGlobalTableSettingsInput { - s.GlobalTableProvisionedWriteCapacityUnits = &v - return s -} - -// SetReplicaSettingsUpdate sets the ReplicaSettingsUpdate field's value. -func (s *UpdateGlobalTableSettingsInput) SetReplicaSettingsUpdate(v []*ReplicaSettingsUpdate) *UpdateGlobalTableSettingsInput { - s.ReplicaSettingsUpdate = v - return s -} - -type UpdateGlobalTableSettingsOutput struct { - _ struct{} `type:"structure"` - - // The name of the global table. - GlobalTableName *string `min:"3" type:"string"` - - // The Region-specific settings for the global table. - ReplicaSettings []*ReplicaSettingsDescription `type:"list"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateGlobalTableSettingsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateGlobalTableSettingsOutput) GoString() string { - return s.String() -} - -// SetGlobalTableName sets the GlobalTableName field's value. -func (s *UpdateGlobalTableSettingsOutput) SetGlobalTableName(v string) *UpdateGlobalTableSettingsOutput { - s.GlobalTableName = &v - return s -} - -// SetReplicaSettings sets the ReplicaSettings field's value. -func (s *UpdateGlobalTableSettingsOutput) SetReplicaSettings(v []*ReplicaSettingsDescription) *UpdateGlobalTableSettingsOutput { - s.ReplicaSettings = v - return s -} - -// Represents the input of an UpdateItem operation. -type UpdateItemInput struct { - _ struct{} `type:"structure"` - - // This is a legacy parameter. Use UpdateExpression instead. For more information, - // see AttributeUpdates (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributeUpdates.html) - // in the Amazon DynamoDB Developer Guide. - AttributeUpdates map[string]*AttributeValueUpdate `type:"map"` - - // A condition that must be satisfied in order for a conditional update to succeed. - // - // An expression can contain any of the following: - // - // * Functions: attribute_exists | attribute_not_exists | attribute_type - // | contains | begins_with | size These function names are case-sensitive. - // - // * Comparison operators: = | <> | < | > | <= | >= | BETWEEN | IN - // - // * Logical operators: AND | OR | NOT - // - // For more information about condition expressions, see Specifying Conditions - // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) - // in the Amazon DynamoDB Developer Guide. - ConditionExpression *string `type:"string"` - - // This is a legacy parameter. Use ConditionExpression instead. For more information, - // see ConditionalOperator (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html) - // in the Amazon DynamoDB Developer Guide. - ConditionalOperator *string `type:"string" enum:"ConditionalOperator"` - - // This is a legacy parameter. Use ConditionExpression instead. For more information, - // see Expected (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.Expected.html) - // in the Amazon DynamoDB Developer Guide. - Expected map[string]*ExpectedAttributeValue `type:"map"` - - // One or more substitution tokens for attribute names in an expression. The - // following are some use cases for using ExpressionAttributeNames: - // - // * To access an attribute whose name conflicts with a DynamoDB reserved - // word. - // - // * To create a placeholder for repeating occurrences of an attribute name - // in an expression. - // - // * To prevent special characters in an attribute name from being misinterpreted - // in an expression. - // - // Use the # character in an expression to dereference an attribute name. For - // example, consider the following attribute name: - // - // * Percentile - // - // The name of this attribute conflicts with a reserved word, so it cannot be - // used directly in an expression. (For the complete list of reserved words, - // see Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) - // in the Amazon DynamoDB Developer Guide.) To work around this, you could specify - // the following for ExpressionAttributeNames: - // - // * {"#P":"Percentile"} - // - // You could then use this substitution in an expression, as in this example: - // - // * #P = :val - // - // Tokens that begin with the : character are expression attribute values, which - // are placeholders for the actual value at runtime. - // - // For more information about expression attribute names, see Specifying Item - // Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) - // in the Amazon DynamoDB Developer Guide. - ExpressionAttributeNames map[string]*string `type:"map"` - - // One or more values that can be substituted in an expression. - // - // Use the : (colon) character in an expression to dereference an attribute - // value. For example, suppose that you wanted to check whether the value of - // the ProductStatus attribute was one of the following: - // - // Available | Backordered | Discontinued - // - // You would first need to specify ExpressionAttributeValues as follows: - // - // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"} - // } - // - // You could then use these values in an expression, such as this: - // - // ProductStatus IN (:avail, :back, :disc) - // - // For more information on expression attribute values, see Condition Expressions - // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) - // in the Amazon DynamoDB Developer Guide. - ExpressionAttributeValues map[string]*AttributeValue `type:"map"` - - // The primary key of the item to be updated. Each element consists of an attribute - // name and a value for that attribute. - // - // For the primary key, you must provide all of the attributes. For example, - // with a simple primary key, you only need to provide a value for the partition - // key. For a composite primary key, you must provide values for both the partition - // key and the sort key. - // - // Key is a required field - Key map[string]*AttributeValue `type:"map" required:"true"` - - // Determines the level of detail about either provisioned or on-demand throughput - // consumption that is returned in the response: - // - // * INDEXES - The response includes the aggregate ConsumedCapacity for the - // operation, together with ConsumedCapacity for each table and secondary - // index that was accessed. Note that some operations, such as GetItem and - // BatchGetItem, do not access any indexes at all. In these cases, specifying - // INDEXES will only return ConsumedCapacity information for table(s). - // - // * TOTAL - The response includes only the aggregate ConsumedCapacity for - // the operation. - // - // * NONE - No ConsumedCapacity details are included in the response. - ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` - - // Determines whether item collection metrics are returned. If set to SIZE, - // the response includes statistics about item collections, if any, that were - // modified during the operation are returned in the response. If set to NONE - // (the default), no statistics are returned. - ReturnItemCollectionMetrics *string `type:"string" enum:"ReturnItemCollectionMetrics"` - - // Use ReturnValues if you want to get the item attributes as they appear before - // or after they are successfully updated. For UpdateItem, the valid values - // are: - // - // * NONE - If ReturnValues is not specified, or if its value is NONE, then - // nothing is returned. (This setting is the default for ReturnValues.) - // - // * ALL_OLD - Returns all of the attributes of the item, as they appeared - // before the UpdateItem operation. - // - // * UPDATED_OLD - Returns only the updated attributes, as they appeared - // before the UpdateItem operation. - // - // * ALL_NEW - Returns all of the attributes of the item, as they appear - // after the UpdateItem operation. - // - // * UPDATED_NEW - Returns only the updated attributes, as they appear after - // the UpdateItem operation. - // - // There is no additional cost associated with requesting a return value aside - // from the small network and processing overhead of receiving a larger response. - // No read capacity units are consumed. - // - // The values returned are strongly consistent. - ReturnValues *string `type:"string" enum:"ReturnValue"` - - // An optional parameter that returns the item attributes for an UpdateItem - // operation that failed a condition check. - // - // There is no additional cost associated with requesting a return value aside - // from the small network and processing overhead of receiving a larger response. - // No read capacity units are consumed. - ReturnValuesOnConditionCheckFailure *string `type:"string" enum:"ReturnValuesOnConditionCheckFailure"` - - // The name of the table containing the item to update. You can also provide - // the Amazon Resource Name (ARN) of the table in this parameter. - // - // TableName is a required field - TableName *string `min:"1" type:"string" required:"true"` - - // An expression that defines one or more attributes to be updated, the action - // to be performed on them, and new values for them. - // - // The following action values are available for UpdateExpression. - // - // * SET - Adds one or more attributes and values to an item. If any of these - // attributes already exist, they are replaced by the new values. You can - // also use SET to add or subtract from an attribute that is of type Number. - // For example: SET myNum = myNum + :val SET supports the following functions: - // if_not_exists (path, operand) - if the item does not contain an attribute - // at the specified path, then if_not_exists evaluates to operand; otherwise, - // it evaluates to path. You can use this function to avoid overwriting an - // attribute that may already be present in the item. list_append (operand, - // operand) - evaluates to a list with a new element added to it. You can - // append the new element to the start or the end of the list by reversing - // the order of the operands. These function names are case-sensitive. - // - // * REMOVE - Removes one or more attributes from an item. - // - // * ADD - Adds the specified value to the item, if the attribute does not - // already exist. If the attribute does exist, then the behavior of ADD depends - // on the data type of the attribute: If the existing attribute is a number, - // and if Value is also a number, then Value is mathematically added to the - // existing attribute. If Value is a negative number, then it is subtracted - // from the existing attribute. If you use ADD to increment or decrement - // a number value for an item that doesn't exist before the update, DynamoDB - // uses 0 as the initial value. Similarly, if you use ADD for an existing - // item to increment or decrement an attribute value that doesn't exist before - // the update, DynamoDB uses 0 as the initial value. For example, suppose - // that the item you want to update doesn't have an attribute named itemcount, - // but you decide to ADD the number 3 to this attribute anyway. DynamoDB - // will create the itemcount attribute, set its initial value to 0, and finally - // add 3 to it. The result will be a new itemcount attribute in the item, - // with a value of 3. If the existing data type is a set and if Value is - // also a set, then Value is added to the existing set. For example, if the - // attribute value is the set [1,2], and the ADD action specified [3], then - // the final attribute value is [1,2,3]. An error occurs if an ADD action - // is specified for a set attribute and the attribute type specified does - // not match the existing set type. Both sets must have the same primitive - // data type. For example, if the existing data type is a set of strings, - // the Value must also be a set of strings. The ADD action only supports - // Number and set data types. In addition, ADD can only be used on top-level - // attributes, not nested attributes. - // - // * DELETE - Deletes an element from a set. If a set of values is specified, - // then those values are subtracted from the old set. For example, if the - // attribute value was the set [a,b,c] and the DELETE action specifies [a,c], - // then the final attribute value is [b]. Specifying an empty set is an error. - // The DELETE action only supports set data types. In addition, DELETE can - // only be used on top-level attributes, not nested attributes. - // - // You can have many actions in a single expression, such as the following: - // SET a=:value1, b=:value2 DELETE :value3, :value4, :value5 - // - // For more information on update expressions, see Modifying Items and Attributes - // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.Modifying.html) - // in the Amazon DynamoDB Developer Guide. - UpdateExpression *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateItemInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateItemInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateItemInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateItemInput"} - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.TableName == nil { - invalidParams.Add(request.NewErrParamRequired("TableName")) - } - if s.TableName != nil && len(*s.TableName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAttributeUpdates sets the AttributeUpdates field's value. -func (s *UpdateItemInput) SetAttributeUpdates(v map[string]*AttributeValueUpdate) *UpdateItemInput { - s.AttributeUpdates = v - return s -} - -// SetConditionExpression sets the ConditionExpression field's value. -func (s *UpdateItemInput) SetConditionExpression(v string) *UpdateItemInput { - s.ConditionExpression = &v - return s -} - -// SetConditionalOperator sets the ConditionalOperator field's value. -func (s *UpdateItemInput) SetConditionalOperator(v string) *UpdateItemInput { - s.ConditionalOperator = &v - return s -} - -// SetExpected sets the Expected field's value. -func (s *UpdateItemInput) SetExpected(v map[string]*ExpectedAttributeValue) *UpdateItemInput { - s.Expected = v - return s -} - -// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value. -func (s *UpdateItemInput) SetExpressionAttributeNames(v map[string]*string) *UpdateItemInput { - s.ExpressionAttributeNames = v - return s -} - -// SetExpressionAttributeValues sets the ExpressionAttributeValues field's value. -func (s *UpdateItemInput) SetExpressionAttributeValues(v map[string]*AttributeValue) *UpdateItemInput { - s.ExpressionAttributeValues = v - return s -} - -// SetKey sets the Key field's value. -func (s *UpdateItemInput) SetKey(v map[string]*AttributeValue) *UpdateItemInput { - s.Key = v - return s -} - -// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value. -func (s *UpdateItemInput) SetReturnConsumedCapacity(v string) *UpdateItemInput { - s.ReturnConsumedCapacity = &v - return s -} - -// SetReturnItemCollectionMetrics sets the ReturnItemCollectionMetrics field's value. -func (s *UpdateItemInput) SetReturnItemCollectionMetrics(v string) *UpdateItemInput { - s.ReturnItemCollectionMetrics = &v - return s -} - -// SetReturnValues sets the ReturnValues field's value. -func (s *UpdateItemInput) SetReturnValues(v string) *UpdateItemInput { - s.ReturnValues = &v - return s -} - -// SetReturnValuesOnConditionCheckFailure sets the ReturnValuesOnConditionCheckFailure field's value. -func (s *UpdateItemInput) SetReturnValuesOnConditionCheckFailure(v string) *UpdateItemInput { - s.ReturnValuesOnConditionCheckFailure = &v - return s -} - -// SetTableName sets the TableName field's value. -func (s *UpdateItemInput) SetTableName(v string) *UpdateItemInput { - s.TableName = &v - return s -} - -// SetUpdateExpression sets the UpdateExpression field's value. -func (s *UpdateItemInput) SetUpdateExpression(v string) *UpdateItemInput { - s.UpdateExpression = &v - return s -} - -// Represents the output of an UpdateItem operation. -type UpdateItemOutput struct { - _ struct{} `type:"structure"` - - // A map of attribute values as they appear before or after the UpdateItem operation, - // as determined by the ReturnValues parameter. - // - // The Attributes map is only present if the update was successful and ReturnValues - // was specified as something other than NONE in the request. Each element represents - // one attribute. - Attributes map[string]*AttributeValue `type:"map"` - - // The capacity units consumed by the UpdateItem operation. The data returned - // includes the total provisioned throughput consumed, along with statistics - // for the table and any indexes involved in the operation. ConsumedCapacity - // is only returned if the ReturnConsumedCapacity parameter was specified. For - // more information, see Capacity unity consumption for write operations (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/read-write-operations.html#write-operation-consumption) - // in the Amazon DynamoDB Developer Guide. - ConsumedCapacity *ConsumedCapacity `type:"structure"` - - // Information about item collections, if any, that were affected by the UpdateItem - // operation. ItemCollectionMetrics is only returned if the ReturnItemCollectionMetrics - // parameter was specified. If the table does not have any local secondary indexes, - // this information is not returned in the response. - // - // Each ItemCollectionMetrics element consists of: - // - // * ItemCollectionKey - The partition key value of the item collection. - // This is the same as the partition key value of the item itself. - // - // * SizeEstimateRangeGB - An estimate of item collection size, in gigabytes. - // This value is a two-element array containing a lower bound and an upper - // bound for the estimate. The estimate includes the size of all the items - // in the table, plus the size of all attributes projected into all of the - // local secondary indexes on that table. Use this estimate to measure whether - // a local secondary index is approaching its size limit. The estimate is - // subject to change over time; therefore, do not rely on the precision or - // accuracy of the estimate. - ItemCollectionMetrics *ItemCollectionMetrics `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateItemOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateItemOutput) GoString() string { - return s.String() -} - -// SetAttributes sets the Attributes field's value. -func (s *UpdateItemOutput) SetAttributes(v map[string]*AttributeValue) *UpdateItemOutput { - s.Attributes = v - return s -} - -// SetConsumedCapacity sets the ConsumedCapacity field's value. -func (s *UpdateItemOutput) SetConsumedCapacity(v *ConsumedCapacity) *UpdateItemOutput { - s.ConsumedCapacity = v - return s -} - -// SetItemCollectionMetrics sets the ItemCollectionMetrics field's value. -func (s *UpdateItemOutput) SetItemCollectionMetrics(v *ItemCollectionMetrics) *UpdateItemOutput { - s.ItemCollectionMetrics = v - return s -} - -// Enables updating the configuration for Kinesis Streaming. -type UpdateKinesisStreamingConfiguration struct { - _ struct{} `type:"structure"` - - // Enables updating the precision of Kinesis data stream timestamp. - ApproximateCreationDateTimePrecision *string `type:"string" enum:"ApproximateCreationDateTimePrecision"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateKinesisStreamingConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateKinesisStreamingConfiguration) GoString() string { - return s.String() -} - -// SetApproximateCreationDateTimePrecision sets the ApproximateCreationDateTimePrecision field's value. -func (s *UpdateKinesisStreamingConfiguration) SetApproximateCreationDateTimePrecision(v string) *UpdateKinesisStreamingConfiguration { - s.ApproximateCreationDateTimePrecision = &v - return s -} - -type UpdateKinesisStreamingDestinationInput struct { - _ struct{} `type:"structure"` - - // The Amazon Resource Name (ARN) for the Kinesis stream input. - // - // StreamArn is a required field - StreamArn *string `min:"37" type:"string" required:"true"` - - // The table name for the Kinesis streaming destination input. You can also - // provide the ARN of the table in this parameter. - // - // TableName is a required field - TableName *string `min:"1" type:"string" required:"true"` - - // The command to update the Kinesis stream configuration. - UpdateKinesisStreamingConfiguration *UpdateKinesisStreamingConfiguration `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateKinesisStreamingDestinationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateKinesisStreamingDestinationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateKinesisStreamingDestinationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateKinesisStreamingDestinationInput"} - if s.StreamArn == nil { - invalidParams.Add(request.NewErrParamRequired("StreamArn")) - } - if s.StreamArn != nil && len(*s.StreamArn) < 37 { - invalidParams.Add(request.NewErrParamMinLen("StreamArn", 37)) - } - if s.TableName == nil { - invalidParams.Add(request.NewErrParamRequired("TableName")) - } - if s.TableName != nil && len(*s.TableName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetStreamArn sets the StreamArn field's value. -func (s *UpdateKinesisStreamingDestinationInput) SetStreamArn(v string) *UpdateKinesisStreamingDestinationInput { - s.StreamArn = &v - return s -} - -// SetTableName sets the TableName field's value. -func (s *UpdateKinesisStreamingDestinationInput) SetTableName(v string) *UpdateKinesisStreamingDestinationInput { - s.TableName = &v - return s -} - -// SetUpdateKinesisStreamingConfiguration sets the UpdateKinesisStreamingConfiguration field's value. -func (s *UpdateKinesisStreamingDestinationInput) SetUpdateKinesisStreamingConfiguration(v *UpdateKinesisStreamingConfiguration) *UpdateKinesisStreamingDestinationInput { - s.UpdateKinesisStreamingConfiguration = v - return s -} - -type UpdateKinesisStreamingDestinationOutput struct { - _ struct{} `type:"structure"` - - // The status of the attempt to update the Kinesis streaming destination output. - DestinationStatus *string `type:"string" enum:"DestinationStatus"` - - // The ARN for the Kinesis stream input. - StreamArn *string `min:"37" type:"string"` - - // The table name for the Kinesis streaming destination output. - TableName *string `min:"3" type:"string"` - - // The command to update the Kinesis streaming destination configuration. - UpdateKinesisStreamingConfiguration *UpdateKinesisStreamingConfiguration `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateKinesisStreamingDestinationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateKinesisStreamingDestinationOutput) GoString() string { - return s.String() -} - -// SetDestinationStatus sets the DestinationStatus field's value. -func (s *UpdateKinesisStreamingDestinationOutput) SetDestinationStatus(v string) *UpdateKinesisStreamingDestinationOutput { - s.DestinationStatus = &v - return s -} - -// SetStreamArn sets the StreamArn field's value. -func (s *UpdateKinesisStreamingDestinationOutput) SetStreamArn(v string) *UpdateKinesisStreamingDestinationOutput { - s.StreamArn = &v - return s -} - -// SetTableName sets the TableName field's value. -func (s *UpdateKinesisStreamingDestinationOutput) SetTableName(v string) *UpdateKinesisStreamingDestinationOutput { - s.TableName = &v - return s -} - -// SetUpdateKinesisStreamingConfiguration sets the UpdateKinesisStreamingConfiguration field's value. -func (s *UpdateKinesisStreamingDestinationOutput) SetUpdateKinesisStreamingConfiguration(v *UpdateKinesisStreamingConfiguration) *UpdateKinesisStreamingDestinationOutput { - s.UpdateKinesisStreamingConfiguration = v - return s -} - -// Represents a replica to be modified. -type UpdateReplicationGroupMemberAction struct { - _ struct{} `type:"structure"` - - // Replica-specific global secondary index settings. - GlobalSecondaryIndexes []*ReplicaGlobalSecondaryIndex `min:"1" type:"list"` - - // The KMS key of the replica that should be used for KMS encryption. To specify - // a key, use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. - // Note that you should only provide this parameter if the key is different - // from the default DynamoDB KMS key alias/aws/dynamodb. - KMSMasterKeyId *string `type:"string"` - - // Overrides the maximum on-demand throughput for the replica table. - OnDemandThroughputOverride *OnDemandThroughputOverride `type:"structure"` - - // Replica-specific provisioned throughput. If not specified, uses the source - // table's provisioned throughput settings. - ProvisionedThroughputOverride *ProvisionedThroughputOverride `type:"structure"` - - // The Region where the replica exists. - // - // RegionName is a required field - RegionName *string `type:"string" required:"true"` - - // Replica-specific table class. If not specified, uses the source table's table - // class. - TableClassOverride *string `type:"string" enum:"TableClass"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateReplicationGroupMemberAction) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateReplicationGroupMemberAction) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateReplicationGroupMemberAction) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateReplicationGroupMemberAction"} - if s.GlobalSecondaryIndexes != nil && len(s.GlobalSecondaryIndexes) < 1 { - invalidParams.Add(request.NewErrParamMinLen("GlobalSecondaryIndexes", 1)) - } - if s.RegionName == nil { - invalidParams.Add(request.NewErrParamRequired("RegionName")) - } - if s.GlobalSecondaryIndexes != nil { - for i, v := range s.GlobalSecondaryIndexes { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "GlobalSecondaryIndexes", i), err.(request.ErrInvalidParams)) - } - } - } - if s.ProvisionedThroughputOverride != nil { - if err := s.ProvisionedThroughputOverride.Validate(); err != nil { - invalidParams.AddNested("ProvisionedThroughputOverride", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetGlobalSecondaryIndexes sets the GlobalSecondaryIndexes field's value. -func (s *UpdateReplicationGroupMemberAction) SetGlobalSecondaryIndexes(v []*ReplicaGlobalSecondaryIndex) *UpdateReplicationGroupMemberAction { - s.GlobalSecondaryIndexes = v - return s -} - -// SetKMSMasterKeyId sets the KMSMasterKeyId field's value. -func (s *UpdateReplicationGroupMemberAction) SetKMSMasterKeyId(v string) *UpdateReplicationGroupMemberAction { - s.KMSMasterKeyId = &v - return s -} - -// SetOnDemandThroughputOverride sets the OnDemandThroughputOverride field's value. -func (s *UpdateReplicationGroupMemberAction) SetOnDemandThroughputOverride(v *OnDemandThroughputOverride) *UpdateReplicationGroupMemberAction { - s.OnDemandThroughputOverride = v - return s -} - -// SetProvisionedThroughputOverride sets the ProvisionedThroughputOverride field's value. -func (s *UpdateReplicationGroupMemberAction) SetProvisionedThroughputOverride(v *ProvisionedThroughputOverride) *UpdateReplicationGroupMemberAction { - s.ProvisionedThroughputOverride = v - return s -} - -// SetRegionName sets the RegionName field's value. -func (s *UpdateReplicationGroupMemberAction) SetRegionName(v string) *UpdateReplicationGroupMemberAction { - s.RegionName = &v - return s -} - -// SetTableClassOverride sets the TableClassOverride field's value. -func (s *UpdateReplicationGroupMemberAction) SetTableClassOverride(v string) *UpdateReplicationGroupMemberAction { - s.TableClassOverride = &v - return s -} - -// Represents the input of an UpdateTable operation. -type UpdateTableInput struct { - _ struct{} `type:"structure"` - - // An array of attributes that describe the key schema for the table and indexes. - // If you are adding a new global secondary index to the table, AttributeDefinitions - // must include the key element(s) of the new index. - AttributeDefinitions []*AttributeDefinition `type:"list"` - - // Controls how you are charged for read and write throughput and how you manage - // capacity. When switching from pay-per-request to provisioned capacity, initial - // provisioned capacity values must be set. The initial provisioned capacity - // values are estimated based on the consumed read and write capacity of your - // table and global secondary indexes over the past 30 minutes. - // - // * PROVISIONED - We recommend using PROVISIONED for predictable workloads. - // PROVISIONED sets the billing mode to Provisioned capacity mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/provisioned-capacity-mode.html). - // - // * PAY_PER_REQUEST - We recommend using PAY_PER_REQUEST for unpredictable - // workloads. PAY_PER_REQUEST sets the billing mode to On-demand capacity - // mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/on-demand-capacity-mode.html). - BillingMode *string `type:"string" enum:"BillingMode"` - - // Indicates whether deletion protection is to be enabled (true) or disabled - // (false) on the table. - DeletionProtectionEnabled *bool `type:"boolean"` - - // An array of one or more global secondary indexes for the table. For each - // index in the array, you can request one action: - // - // * Create - add a new global secondary index to the table. - // - // * Update - modify the provisioned throughput settings of an existing global - // secondary index. - // - // * Delete - remove a global secondary index from the table. - // - // You can create or delete only one global secondary index per UpdateTable - // operation. - // - // For more information, see Managing Global Secondary Indexes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GSI.OnlineOps.html) - // in the Amazon DynamoDB Developer Guide. - GlobalSecondaryIndexUpdates []*GlobalSecondaryIndexUpdate `type:"list"` - - // Updates the maximum number of read and write units for the specified table - // in on-demand capacity mode. If you use this parameter, you must specify MaxReadRequestUnits, - // MaxWriteRequestUnits, or both. - OnDemandThroughput *OnDemandThroughput `type:"structure"` - - // The new provisioned throughput settings for the specified table or index. - ProvisionedThroughput *ProvisionedThroughput `type:"structure"` - - // A list of replica update actions (create, delete, or update) for the table. - // - // For global tables, this property only applies to global tables using Version - // 2019.11.21 (Current version). - ReplicaUpdates []*ReplicationGroupUpdate `min:"1" type:"list"` - - // The new server-side encryption settings for the specified table. - SSESpecification *SSESpecification `type:"structure"` - - // Represents the DynamoDB Streams configuration for the table. - // - // You receive a ValidationException if you try to enable a stream on a table - // that already has a stream, or if you try to disable a stream on a table that - // doesn't have a stream. - StreamSpecification *StreamSpecification `type:"structure"` - - // The table class of the table to be updated. Valid values are STANDARD and - // STANDARD_INFREQUENT_ACCESS. - TableClass *string `type:"string" enum:"TableClass"` - - // The name of the table to be updated. You can also provide the Amazon Resource - // Name (ARN) of the table in this parameter. - // - // TableName is a required field - TableName *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateTableInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateTableInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateTableInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateTableInput"} - if s.ReplicaUpdates != nil && len(s.ReplicaUpdates) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ReplicaUpdates", 1)) - } - if s.TableName == nil { - invalidParams.Add(request.NewErrParamRequired("TableName")) - } - if s.TableName != nil && len(*s.TableName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) - } - if s.AttributeDefinitions != nil { - for i, v := range s.AttributeDefinitions { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AttributeDefinitions", i), err.(request.ErrInvalidParams)) - } - } - } - if s.GlobalSecondaryIndexUpdates != nil { - for i, v := range s.GlobalSecondaryIndexUpdates { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "GlobalSecondaryIndexUpdates", i), err.(request.ErrInvalidParams)) - } - } - } - if s.ProvisionedThroughput != nil { - if err := s.ProvisionedThroughput.Validate(); err != nil { - invalidParams.AddNested("ProvisionedThroughput", err.(request.ErrInvalidParams)) - } - } - if s.ReplicaUpdates != nil { - for i, v := range s.ReplicaUpdates { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ReplicaUpdates", i), err.(request.ErrInvalidParams)) - } - } - } - if s.StreamSpecification != nil { - if err := s.StreamSpecification.Validate(); err != nil { - invalidParams.AddNested("StreamSpecification", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAttributeDefinitions sets the AttributeDefinitions field's value. -func (s *UpdateTableInput) SetAttributeDefinitions(v []*AttributeDefinition) *UpdateTableInput { - s.AttributeDefinitions = v - return s -} - -// SetBillingMode sets the BillingMode field's value. -func (s *UpdateTableInput) SetBillingMode(v string) *UpdateTableInput { - s.BillingMode = &v - return s -} - -// SetDeletionProtectionEnabled sets the DeletionProtectionEnabled field's value. -func (s *UpdateTableInput) SetDeletionProtectionEnabled(v bool) *UpdateTableInput { - s.DeletionProtectionEnabled = &v - return s -} - -// SetGlobalSecondaryIndexUpdates sets the GlobalSecondaryIndexUpdates field's value. -func (s *UpdateTableInput) SetGlobalSecondaryIndexUpdates(v []*GlobalSecondaryIndexUpdate) *UpdateTableInput { - s.GlobalSecondaryIndexUpdates = v - return s -} - -// SetOnDemandThroughput sets the OnDemandThroughput field's value. -func (s *UpdateTableInput) SetOnDemandThroughput(v *OnDemandThroughput) *UpdateTableInput { - s.OnDemandThroughput = v - return s -} - -// SetProvisionedThroughput sets the ProvisionedThroughput field's value. -func (s *UpdateTableInput) SetProvisionedThroughput(v *ProvisionedThroughput) *UpdateTableInput { - s.ProvisionedThroughput = v - return s -} - -// SetReplicaUpdates sets the ReplicaUpdates field's value. -func (s *UpdateTableInput) SetReplicaUpdates(v []*ReplicationGroupUpdate) *UpdateTableInput { - s.ReplicaUpdates = v - return s -} - -// SetSSESpecification sets the SSESpecification field's value. -func (s *UpdateTableInput) SetSSESpecification(v *SSESpecification) *UpdateTableInput { - s.SSESpecification = v - return s -} - -// SetStreamSpecification sets the StreamSpecification field's value. -func (s *UpdateTableInput) SetStreamSpecification(v *StreamSpecification) *UpdateTableInput { - s.StreamSpecification = v - return s -} - -// SetTableClass sets the TableClass field's value. -func (s *UpdateTableInput) SetTableClass(v string) *UpdateTableInput { - s.TableClass = &v - return s -} - -// SetTableName sets the TableName field's value. -func (s *UpdateTableInput) SetTableName(v string) *UpdateTableInput { - s.TableName = &v - return s -} - -// Represents the output of an UpdateTable operation. -type UpdateTableOutput struct { - _ struct{} `type:"structure"` - - // Represents the properties of the table. - TableDescription *TableDescription `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateTableOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateTableOutput) GoString() string { - return s.String() -} - -// SetTableDescription sets the TableDescription field's value. -func (s *UpdateTableOutput) SetTableDescription(v *TableDescription) *UpdateTableOutput { - s.TableDescription = v - return s -} - -type UpdateTableReplicaAutoScalingInput struct { - _ struct{} `type:"structure"` - - // Represents the auto scaling settings of the global secondary indexes of the - // replica to be updated. - GlobalSecondaryIndexUpdates []*GlobalSecondaryIndexAutoScalingUpdate `min:"1" type:"list"` - - // Represents the auto scaling settings to be modified for a global table or - // global secondary index. - ProvisionedWriteCapacityAutoScalingUpdate *AutoScalingSettingsUpdate `type:"structure"` - - // Represents the auto scaling settings of replicas of the table that will be - // modified. - ReplicaUpdates []*ReplicaAutoScalingUpdate `min:"1" type:"list"` - - // The name of the global table to be updated. You can also provide the Amazon - // Resource Name (ARN) of the table in this parameter. - // - // TableName is a required field - TableName *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateTableReplicaAutoScalingInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateTableReplicaAutoScalingInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateTableReplicaAutoScalingInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateTableReplicaAutoScalingInput"} - if s.GlobalSecondaryIndexUpdates != nil && len(s.GlobalSecondaryIndexUpdates) < 1 { - invalidParams.Add(request.NewErrParamMinLen("GlobalSecondaryIndexUpdates", 1)) - } - if s.ReplicaUpdates != nil && len(s.ReplicaUpdates) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ReplicaUpdates", 1)) - } - if s.TableName == nil { - invalidParams.Add(request.NewErrParamRequired("TableName")) - } - if s.TableName != nil && len(*s.TableName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) - } - if s.GlobalSecondaryIndexUpdates != nil { - for i, v := range s.GlobalSecondaryIndexUpdates { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "GlobalSecondaryIndexUpdates", i), err.(request.ErrInvalidParams)) - } - } - } - if s.ProvisionedWriteCapacityAutoScalingUpdate != nil { - if err := s.ProvisionedWriteCapacityAutoScalingUpdate.Validate(); err != nil { - invalidParams.AddNested("ProvisionedWriteCapacityAutoScalingUpdate", err.(request.ErrInvalidParams)) - } - } - if s.ReplicaUpdates != nil { - for i, v := range s.ReplicaUpdates { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ReplicaUpdates", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetGlobalSecondaryIndexUpdates sets the GlobalSecondaryIndexUpdates field's value. -func (s *UpdateTableReplicaAutoScalingInput) SetGlobalSecondaryIndexUpdates(v []*GlobalSecondaryIndexAutoScalingUpdate) *UpdateTableReplicaAutoScalingInput { - s.GlobalSecondaryIndexUpdates = v - return s -} - -// SetProvisionedWriteCapacityAutoScalingUpdate sets the ProvisionedWriteCapacityAutoScalingUpdate field's value. -func (s *UpdateTableReplicaAutoScalingInput) SetProvisionedWriteCapacityAutoScalingUpdate(v *AutoScalingSettingsUpdate) *UpdateTableReplicaAutoScalingInput { - s.ProvisionedWriteCapacityAutoScalingUpdate = v - return s -} - -// SetReplicaUpdates sets the ReplicaUpdates field's value. -func (s *UpdateTableReplicaAutoScalingInput) SetReplicaUpdates(v []*ReplicaAutoScalingUpdate) *UpdateTableReplicaAutoScalingInput { - s.ReplicaUpdates = v - return s -} - -// SetTableName sets the TableName field's value. -func (s *UpdateTableReplicaAutoScalingInput) SetTableName(v string) *UpdateTableReplicaAutoScalingInput { - s.TableName = &v - return s -} - -type UpdateTableReplicaAutoScalingOutput struct { - _ struct{} `type:"structure"` - - // Returns information about the auto scaling settings of a table with replicas. - TableAutoScalingDescription *TableAutoScalingDescription `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateTableReplicaAutoScalingOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateTableReplicaAutoScalingOutput) GoString() string { - return s.String() -} - -// SetTableAutoScalingDescription sets the TableAutoScalingDescription field's value. -func (s *UpdateTableReplicaAutoScalingOutput) SetTableAutoScalingDescription(v *TableAutoScalingDescription) *UpdateTableReplicaAutoScalingOutput { - s.TableAutoScalingDescription = v - return s -} - -// Represents the input of an UpdateTimeToLive operation. -type UpdateTimeToLiveInput struct { - _ struct{} `type:"structure"` - - // The name of the table to be configured. You can also provide the Amazon Resource - // Name (ARN) of the table in this parameter. - // - // TableName is a required field - TableName *string `min:"1" type:"string" required:"true"` - - // Represents the settings used to enable or disable Time to Live for the specified - // table. - // - // TimeToLiveSpecification is a required field - TimeToLiveSpecification *TimeToLiveSpecification `type:"structure" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateTimeToLiveInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateTimeToLiveInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateTimeToLiveInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateTimeToLiveInput"} - if s.TableName == nil { - invalidParams.Add(request.NewErrParamRequired("TableName")) - } - if s.TableName != nil && len(*s.TableName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) - } - if s.TimeToLiveSpecification == nil { - invalidParams.Add(request.NewErrParamRequired("TimeToLiveSpecification")) - } - if s.TimeToLiveSpecification != nil { - if err := s.TimeToLiveSpecification.Validate(); err != nil { - invalidParams.AddNested("TimeToLiveSpecification", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetTableName sets the TableName field's value. -func (s *UpdateTimeToLiveInput) SetTableName(v string) *UpdateTimeToLiveInput { - s.TableName = &v - return s -} - -// SetTimeToLiveSpecification sets the TimeToLiveSpecification field's value. -func (s *UpdateTimeToLiveInput) SetTimeToLiveSpecification(v *TimeToLiveSpecification) *UpdateTimeToLiveInput { - s.TimeToLiveSpecification = v - return s -} - -type UpdateTimeToLiveOutput struct { - _ struct{} `type:"structure"` - - // Represents the output of an UpdateTimeToLive operation. - TimeToLiveSpecification *TimeToLiveSpecification `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateTimeToLiveOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateTimeToLiveOutput) GoString() string { - return s.String() -} - -// SetTimeToLiveSpecification sets the TimeToLiveSpecification field's value. -func (s *UpdateTimeToLiveOutput) SetTimeToLiveSpecification(v *TimeToLiveSpecification) *UpdateTimeToLiveOutput { - s.TimeToLiveSpecification = v - return s -} - -// Represents an operation to perform - either DeleteItem or PutItem. You can -// only request one of these operations, not both, in a single WriteRequest. -// If you do need to perform both of these operations, you need to provide two -// separate WriteRequest objects. -type WriteRequest struct { - _ struct{} `type:"structure"` - - // A request to perform a DeleteItem operation. - DeleteRequest *DeleteRequest `type:"structure"` - - // A request to perform a PutItem operation. - PutRequest *PutRequest `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s WriteRequest) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s WriteRequest) GoString() string { - return s.String() -} - -// SetDeleteRequest sets the DeleteRequest field's value. -func (s *WriteRequest) SetDeleteRequest(v *DeleteRequest) *WriteRequest { - s.DeleteRequest = v - return s -} - -// SetPutRequest sets the PutRequest field's value. -func (s *WriteRequest) SetPutRequest(v *PutRequest) *WriteRequest { - s.PutRequest = v - return s -} - -const ( - // ApproximateCreationDateTimePrecisionMillisecond is a ApproximateCreationDateTimePrecision enum value - ApproximateCreationDateTimePrecisionMillisecond = "MILLISECOND" - - // ApproximateCreationDateTimePrecisionMicrosecond is a ApproximateCreationDateTimePrecision enum value - ApproximateCreationDateTimePrecisionMicrosecond = "MICROSECOND" -) - -// ApproximateCreationDateTimePrecision_Values returns all elements of the ApproximateCreationDateTimePrecision enum -func ApproximateCreationDateTimePrecision_Values() []string { - return []string{ - ApproximateCreationDateTimePrecisionMillisecond, - ApproximateCreationDateTimePrecisionMicrosecond, - } -} - -const ( - // AttributeActionAdd is a AttributeAction enum value - AttributeActionAdd = "ADD" - - // AttributeActionPut is a AttributeAction enum value - AttributeActionPut = "PUT" - - // AttributeActionDelete is a AttributeAction enum value - AttributeActionDelete = "DELETE" -) - -// AttributeAction_Values returns all elements of the AttributeAction enum -func AttributeAction_Values() []string { - return []string{ - AttributeActionAdd, - AttributeActionPut, - AttributeActionDelete, - } -} - -const ( - // BackupStatusCreating is a BackupStatus enum value - BackupStatusCreating = "CREATING" - - // BackupStatusDeleted is a BackupStatus enum value - BackupStatusDeleted = "DELETED" - - // BackupStatusAvailable is a BackupStatus enum value - BackupStatusAvailable = "AVAILABLE" -) - -// BackupStatus_Values returns all elements of the BackupStatus enum -func BackupStatus_Values() []string { - return []string{ - BackupStatusCreating, - BackupStatusDeleted, - BackupStatusAvailable, - } -} - -const ( - // BackupTypeUser is a BackupType enum value - BackupTypeUser = "USER" - - // BackupTypeSystem is a BackupType enum value - BackupTypeSystem = "SYSTEM" - - // BackupTypeAwsBackup is a BackupType enum value - BackupTypeAwsBackup = "AWS_BACKUP" -) - -// BackupType_Values returns all elements of the BackupType enum -func BackupType_Values() []string { - return []string{ - BackupTypeUser, - BackupTypeSystem, - BackupTypeAwsBackup, - } -} - -const ( - // BackupTypeFilterUser is a BackupTypeFilter enum value - BackupTypeFilterUser = "USER" - - // BackupTypeFilterSystem is a BackupTypeFilter enum value - BackupTypeFilterSystem = "SYSTEM" - - // BackupTypeFilterAwsBackup is a BackupTypeFilter enum value - BackupTypeFilterAwsBackup = "AWS_BACKUP" - - // BackupTypeFilterAll is a BackupTypeFilter enum value - BackupTypeFilterAll = "ALL" -) - -// BackupTypeFilter_Values returns all elements of the BackupTypeFilter enum -func BackupTypeFilter_Values() []string { - return []string{ - BackupTypeFilterUser, - BackupTypeFilterSystem, - BackupTypeFilterAwsBackup, - BackupTypeFilterAll, - } -} - -const ( - // BatchStatementErrorCodeEnumConditionalCheckFailed is a BatchStatementErrorCodeEnum enum value - BatchStatementErrorCodeEnumConditionalCheckFailed = "ConditionalCheckFailed" - - // BatchStatementErrorCodeEnumItemCollectionSizeLimitExceeded is a BatchStatementErrorCodeEnum enum value - BatchStatementErrorCodeEnumItemCollectionSizeLimitExceeded = "ItemCollectionSizeLimitExceeded" - - // BatchStatementErrorCodeEnumRequestLimitExceeded is a BatchStatementErrorCodeEnum enum value - BatchStatementErrorCodeEnumRequestLimitExceeded = "RequestLimitExceeded" - - // BatchStatementErrorCodeEnumValidationError is a BatchStatementErrorCodeEnum enum value - BatchStatementErrorCodeEnumValidationError = "ValidationError" - - // BatchStatementErrorCodeEnumProvisionedThroughputExceeded is a BatchStatementErrorCodeEnum enum value - BatchStatementErrorCodeEnumProvisionedThroughputExceeded = "ProvisionedThroughputExceeded" - - // BatchStatementErrorCodeEnumTransactionConflict is a BatchStatementErrorCodeEnum enum value - BatchStatementErrorCodeEnumTransactionConflict = "TransactionConflict" - - // BatchStatementErrorCodeEnumThrottlingError is a BatchStatementErrorCodeEnum enum value - BatchStatementErrorCodeEnumThrottlingError = "ThrottlingError" - - // BatchStatementErrorCodeEnumInternalServerError is a BatchStatementErrorCodeEnum enum value - BatchStatementErrorCodeEnumInternalServerError = "InternalServerError" - - // BatchStatementErrorCodeEnumResourceNotFound is a BatchStatementErrorCodeEnum enum value - BatchStatementErrorCodeEnumResourceNotFound = "ResourceNotFound" - - // BatchStatementErrorCodeEnumAccessDenied is a BatchStatementErrorCodeEnum enum value - BatchStatementErrorCodeEnumAccessDenied = "AccessDenied" - - // BatchStatementErrorCodeEnumDuplicateItem is a BatchStatementErrorCodeEnum enum value - BatchStatementErrorCodeEnumDuplicateItem = "DuplicateItem" -) - -// BatchStatementErrorCodeEnum_Values returns all elements of the BatchStatementErrorCodeEnum enum -func BatchStatementErrorCodeEnum_Values() []string { - return []string{ - BatchStatementErrorCodeEnumConditionalCheckFailed, - BatchStatementErrorCodeEnumItemCollectionSizeLimitExceeded, - BatchStatementErrorCodeEnumRequestLimitExceeded, - BatchStatementErrorCodeEnumValidationError, - BatchStatementErrorCodeEnumProvisionedThroughputExceeded, - BatchStatementErrorCodeEnumTransactionConflict, - BatchStatementErrorCodeEnumThrottlingError, - BatchStatementErrorCodeEnumInternalServerError, - BatchStatementErrorCodeEnumResourceNotFound, - BatchStatementErrorCodeEnumAccessDenied, - BatchStatementErrorCodeEnumDuplicateItem, - } -} - -const ( - // BillingModeProvisioned is a BillingMode enum value - BillingModeProvisioned = "PROVISIONED" - - // BillingModePayPerRequest is a BillingMode enum value - BillingModePayPerRequest = "PAY_PER_REQUEST" -) - -// BillingMode_Values returns all elements of the BillingMode enum -func BillingMode_Values() []string { - return []string{ - BillingModeProvisioned, - BillingModePayPerRequest, - } -} - -const ( - // ComparisonOperatorEq is a ComparisonOperator enum value - ComparisonOperatorEq = "EQ" - - // ComparisonOperatorNe is a ComparisonOperator enum value - ComparisonOperatorNe = "NE" - - // ComparisonOperatorIn is a ComparisonOperator enum value - ComparisonOperatorIn = "IN" - - // ComparisonOperatorLe is a ComparisonOperator enum value - ComparisonOperatorLe = "LE" - - // ComparisonOperatorLt is a ComparisonOperator enum value - ComparisonOperatorLt = "LT" - - // ComparisonOperatorGe is a ComparisonOperator enum value - ComparisonOperatorGe = "GE" - - // ComparisonOperatorGt is a ComparisonOperator enum value - ComparisonOperatorGt = "GT" - - // ComparisonOperatorBetween is a ComparisonOperator enum value - ComparisonOperatorBetween = "BETWEEN" - - // ComparisonOperatorNotNull is a ComparisonOperator enum value - ComparisonOperatorNotNull = "NOT_NULL" - - // ComparisonOperatorNull is a ComparisonOperator enum value - ComparisonOperatorNull = "NULL" - - // ComparisonOperatorContains is a ComparisonOperator enum value - ComparisonOperatorContains = "CONTAINS" - - // ComparisonOperatorNotContains is a ComparisonOperator enum value - ComparisonOperatorNotContains = "NOT_CONTAINS" - - // ComparisonOperatorBeginsWith is a ComparisonOperator enum value - ComparisonOperatorBeginsWith = "BEGINS_WITH" -) - -// ComparisonOperator_Values returns all elements of the ComparisonOperator enum -func ComparisonOperator_Values() []string { - return []string{ - ComparisonOperatorEq, - ComparisonOperatorNe, - ComparisonOperatorIn, - ComparisonOperatorLe, - ComparisonOperatorLt, - ComparisonOperatorGe, - ComparisonOperatorGt, - ComparisonOperatorBetween, - ComparisonOperatorNotNull, - ComparisonOperatorNull, - ComparisonOperatorContains, - ComparisonOperatorNotContains, - ComparisonOperatorBeginsWith, - } -} - -const ( - // ConditionalOperatorAnd is a ConditionalOperator enum value - ConditionalOperatorAnd = "AND" - - // ConditionalOperatorOr is a ConditionalOperator enum value - ConditionalOperatorOr = "OR" -) - -// ConditionalOperator_Values returns all elements of the ConditionalOperator enum -func ConditionalOperator_Values() []string { - return []string{ - ConditionalOperatorAnd, - ConditionalOperatorOr, - } -} - -const ( - // ContinuousBackupsStatusEnabled is a ContinuousBackupsStatus enum value - ContinuousBackupsStatusEnabled = "ENABLED" - - // ContinuousBackupsStatusDisabled is a ContinuousBackupsStatus enum value - ContinuousBackupsStatusDisabled = "DISABLED" -) - -// ContinuousBackupsStatus_Values returns all elements of the ContinuousBackupsStatus enum -func ContinuousBackupsStatus_Values() []string { - return []string{ - ContinuousBackupsStatusEnabled, - ContinuousBackupsStatusDisabled, - } -} - -const ( - // ContributorInsightsActionEnable is a ContributorInsightsAction enum value - ContributorInsightsActionEnable = "ENABLE" - - // ContributorInsightsActionDisable is a ContributorInsightsAction enum value - ContributorInsightsActionDisable = "DISABLE" -) - -// ContributorInsightsAction_Values returns all elements of the ContributorInsightsAction enum -func ContributorInsightsAction_Values() []string { - return []string{ - ContributorInsightsActionEnable, - ContributorInsightsActionDisable, - } -} - -const ( - // ContributorInsightsStatusEnabling is a ContributorInsightsStatus enum value - ContributorInsightsStatusEnabling = "ENABLING" - - // ContributorInsightsStatusEnabled is a ContributorInsightsStatus enum value - ContributorInsightsStatusEnabled = "ENABLED" - - // ContributorInsightsStatusDisabling is a ContributorInsightsStatus enum value - ContributorInsightsStatusDisabling = "DISABLING" - - // ContributorInsightsStatusDisabled is a ContributorInsightsStatus enum value - ContributorInsightsStatusDisabled = "DISABLED" - - // ContributorInsightsStatusFailed is a ContributorInsightsStatus enum value - ContributorInsightsStatusFailed = "FAILED" -) - -// ContributorInsightsStatus_Values returns all elements of the ContributorInsightsStatus enum -func ContributorInsightsStatus_Values() []string { - return []string{ - ContributorInsightsStatusEnabling, - ContributorInsightsStatusEnabled, - ContributorInsightsStatusDisabling, - ContributorInsightsStatusDisabled, - ContributorInsightsStatusFailed, - } -} - -const ( - // DestinationStatusEnabling is a DestinationStatus enum value - DestinationStatusEnabling = "ENABLING" - - // DestinationStatusActive is a DestinationStatus enum value - DestinationStatusActive = "ACTIVE" - - // DestinationStatusDisabling is a DestinationStatus enum value - DestinationStatusDisabling = "DISABLING" - - // DestinationStatusDisabled is a DestinationStatus enum value - DestinationStatusDisabled = "DISABLED" - - // DestinationStatusEnableFailed is a DestinationStatus enum value - DestinationStatusEnableFailed = "ENABLE_FAILED" - - // DestinationStatusUpdating is a DestinationStatus enum value - DestinationStatusUpdating = "UPDATING" -) - -// DestinationStatus_Values returns all elements of the DestinationStatus enum -func DestinationStatus_Values() []string { - return []string{ - DestinationStatusEnabling, - DestinationStatusActive, - DestinationStatusDisabling, - DestinationStatusDisabled, - DestinationStatusEnableFailed, - DestinationStatusUpdating, - } -} - -const ( - // ExportFormatDynamodbJson is a ExportFormat enum value - ExportFormatDynamodbJson = "DYNAMODB_JSON" - - // ExportFormatIon is a ExportFormat enum value - ExportFormatIon = "ION" -) - -// ExportFormat_Values returns all elements of the ExportFormat enum -func ExportFormat_Values() []string { - return []string{ - ExportFormatDynamodbJson, - ExportFormatIon, - } -} - -const ( - // ExportStatusInProgress is a ExportStatus enum value - ExportStatusInProgress = "IN_PROGRESS" - - // ExportStatusCompleted is a ExportStatus enum value - ExportStatusCompleted = "COMPLETED" - - // ExportStatusFailed is a ExportStatus enum value - ExportStatusFailed = "FAILED" -) - -// ExportStatus_Values returns all elements of the ExportStatus enum -func ExportStatus_Values() []string { - return []string{ - ExportStatusInProgress, - ExportStatusCompleted, - ExportStatusFailed, - } -} - -const ( - // ExportTypeFullExport is a ExportType enum value - ExportTypeFullExport = "FULL_EXPORT" - - // ExportTypeIncrementalExport is a ExportType enum value - ExportTypeIncrementalExport = "INCREMENTAL_EXPORT" -) - -// ExportType_Values returns all elements of the ExportType enum -func ExportType_Values() []string { - return []string{ - ExportTypeFullExport, - ExportTypeIncrementalExport, - } -} - -const ( - // ExportViewTypeNewImage is a ExportViewType enum value - ExportViewTypeNewImage = "NEW_IMAGE" - - // ExportViewTypeNewAndOldImages is a ExportViewType enum value - ExportViewTypeNewAndOldImages = "NEW_AND_OLD_IMAGES" -) - -// ExportViewType_Values returns all elements of the ExportViewType enum -func ExportViewType_Values() []string { - return []string{ - ExportViewTypeNewImage, - ExportViewTypeNewAndOldImages, - } -} - -const ( - // GlobalTableStatusCreating is a GlobalTableStatus enum value - GlobalTableStatusCreating = "CREATING" - - // GlobalTableStatusActive is a GlobalTableStatus enum value - GlobalTableStatusActive = "ACTIVE" - - // GlobalTableStatusDeleting is a GlobalTableStatus enum value - GlobalTableStatusDeleting = "DELETING" - - // GlobalTableStatusUpdating is a GlobalTableStatus enum value - GlobalTableStatusUpdating = "UPDATING" -) - -// GlobalTableStatus_Values returns all elements of the GlobalTableStatus enum -func GlobalTableStatus_Values() []string { - return []string{ - GlobalTableStatusCreating, - GlobalTableStatusActive, - GlobalTableStatusDeleting, - GlobalTableStatusUpdating, - } -} - -const ( - // ImportStatusInProgress is a ImportStatus enum value - ImportStatusInProgress = "IN_PROGRESS" - - // ImportStatusCompleted is a ImportStatus enum value - ImportStatusCompleted = "COMPLETED" - - // ImportStatusCancelling is a ImportStatus enum value - ImportStatusCancelling = "CANCELLING" - - // ImportStatusCancelled is a ImportStatus enum value - ImportStatusCancelled = "CANCELLED" - - // ImportStatusFailed is a ImportStatus enum value - ImportStatusFailed = "FAILED" -) - -// ImportStatus_Values returns all elements of the ImportStatus enum -func ImportStatus_Values() []string { - return []string{ - ImportStatusInProgress, - ImportStatusCompleted, - ImportStatusCancelling, - ImportStatusCancelled, - ImportStatusFailed, - } -} - -const ( - // IndexStatusCreating is a IndexStatus enum value - IndexStatusCreating = "CREATING" - - // IndexStatusUpdating is a IndexStatus enum value - IndexStatusUpdating = "UPDATING" - - // IndexStatusDeleting is a IndexStatus enum value - IndexStatusDeleting = "DELETING" - - // IndexStatusActive is a IndexStatus enum value - IndexStatusActive = "ACTIVE" -) - -// IndexStatus_Values returns all elements of the IndexStatus enum -func IndexStatus_Values() []string { - return []string{ - IndexStatusCreating, - IndexStatusUpdating, - IndexStatusDeleting, - IndexStatusActive, - } -} - -const ( - // InputCompressionTypeGzip is a InputCompressionType enum value - InputCompressionTypeGzip = "GZIP" - - // InputCompressionTypeZstd is a InputCompressionType enum value - InputCompressionTypeZstd = "ZSTD" - - // InputCompressionTypeNone is a InputCompressionType enum value - InputCompressionTypeNone = "NONE" -) - -// InputCompressionType_Values returns all elements of the InputCompressionType enum -func InputCompressionType_Values() []string { - return []string{ - InputCompressionTypeGzip, - InputCompressionTypeZstd, - InputCompressionTypeNone, - } -} - -const ( - // InputFormatDynamodbJson is a InputFormat enum value - InputFormatDynamodbJson = "DYNAMODB_JSON" - - // InputFormatIon is a InputFormat enum value - InputFormatIon = "ION" - - // InputFormatCsv is a InputFormat enum value - InputFormatCsv = "CSV" -) - -// InputFormat_Values returns all elements of the InputFormat enum -func InputFormat_Values() []string { - return []string{ - InputFormatDynamodbJson, - InputFormatIon, - InputFormatCsv, - } -} - -const ( - // KeyTypeHash is a KeyType enum value - KeyTypeHash = "HASH" - - // KeyTypeRange is a KeyType enum value - KeyTypeRange = "RANGE" -) - -// KeyType_Values returns all elements of the KeyType enum -func KeyType_Values() []string { - return []string{ - KeyTypeHash, - KeyTypeRange, - } -} - -const ( - // PointInTimeRecoveryStatusEnabled is a PointInTimeRecoveryStatus enum value - PointInTimeRecoveryStatusEnabled = "ENABLED" - - // PointInTimeRecoveryStatusDisabled is a PointInTimeRecoveryStatus enum value - PointInTimeRecoveryStatusDisabled = "DISABLED" -) - -// PointInTimeRecoveryStatus_Values returns all elements of the PointInTimeRecoveryStatus enum -func PointInTimeRecoveryStatus_Values() []string { - return []string{ - PointInTimeRecoveryStatusEnabled, - PointInTimeRecoveryStatusDisabled, - } -} - -const ( - // ProjectionTypeAll is a ProjectionType enum value - ProjectionTypeAll = "ALL" - - // ProjectionTypeKeysOnly is a ProjectionType enum value - ProjectionTypeKeysOnly = "KEYS_ONLY" - - // ProjectionTypeInclude is a ProjectionType enum value - ProjectionTypeInclude = "INCLUDE" -) - -// ProjectionType_Values returns all elements of the ProjectionType enum -func ProjectionType_Values() []string { - return []string{ - ProjectionTypeAll, - ProjectionTypeKeysOnly, - ProjectionTypeInclude, - } -} - -const ( - // ReplicaStatusCreating is a ReplicaStatus enum value - ReplicaStatusCreating = "CREATING" - - // ReplicaStatusCreationFailed is a ReplicaStatus enum value - ReplicaStatusCreationFailed = "CREATION_FAILED" - - // ReplicaStatusUpdating is a ReplicaStatus enum value - ReplicaStatusUpdating = "UPDATING" - - // ReplicaStatusDeleting is a ReplicaStatus enum value - ReplicaStatusDeleting = "DELETING" - - // ReplicaStatusActive is a ReplicaStatus enum value - ReplicaStatusActive = "ACTIVE" - - // ReplicaStatusRegionDisabled is a ReplicaStatus enum value - ReplicaStatusRegionDisabled = "REGION_DISABLED" - - // ReplicaStatusInaccessibleEncryptionCredentials is a ReplicaStatus enum value - ReplicaStatusInaccessibleEncryptionCredentials = "INACCESSIBLE_ENCRYPTION_CREDENTIALS" -) - -// ReplicaStatus_Values returns all elements of the ReplicaStatus enum -func ReplicaStatus_Values() []string { - return []string{ - ReplicaStatusCreating, - ReplicaStatusCreationFailed, - ReplicaStatusUpdating, - ReplicaStatusDeleting, - ReplicaStatusActive, - ReplicaStatusRegionDisabled, - ReplicaStatusInaccessibleEncryptionCredentials, - } -} - -// Determines the level of detail about either provisioned or on-demand throughput -// consumption that is returned in the response: -// -// - INDEXES - The response includes the aggregate ConsumedCapacity for the -// operation, together with ConsumedCapacity for each table and secondary -// index that was accessed. Note that some operations, such as GetItem and -// BatchGetItem, do not access any indexes at all. In these cases, specifying -// INDEXES will only return ConsumedCapacity information for table(s). -// -// - TOTAL - The response includes only the aggregate ConsumedCapacity for -// the operation. -// -// - NONE - No ConsumedCapacity details are included in the response. -const ( - // ReturnConsumedCapacityIndexes is a ReturnConsumedCapacity enum value - ReturnConsumedCapacityIndexes = "INDEXES" - - // ReturnConsumedCapacityTotal is a ReturnConsumedCapacity enum value - ReturnConsumedCapacityTotal = "TOTAL" - - // ReturnConsumedCapacityNone is a ReturnConsumedCapacity enum value - ReturnConsumedCapacityNone = "NONE" -) - -// ReturnConsumedCapacity_Values returns all elements of the ReturnConsumedCapacity enum -func ReturnConsumedCapacity_Values() []string { - return []string{ - ReturnConsumedCapacityIndexes, - ReturnConsumedCapacityTotal, - ReturnConsumedCapacityNone, - } -} - -const ( - // ReturnItemCollectionMetricsSize is a ReturnItemCollectionMetrics enum value - ReturnItemCollectionMetricsSize = "SIZE" - - // ReturnItemCollectionMetricsNone is a ReturnItemCollectionMetrics enum value - ReturnItemCollectionMetricsNone = "NONE" -) - -// ReturnItemCollectionMetrics_Values returns all elements of the ReturnItemCollectionMetrics enum -func ReturnItemCollectionMetrics_Values() []string { - return []string{ - ReturnItemCollectionMetricsSize, - ReturnItemCollectionMetricsNone, - } -} - -const ( - // ReturnValueNone is a ReturnValue enum value - ReturnValueNone = "NONE" - - // ReturnValueAllOld is a ReturnValue enum value - ReturnValueAllOld = "ALL_OLD" - - // ReturnValueUpdatedOld is a ReturnValue enum value - ReturnValueUpdatedOld = "UPDATED_OLD" - - // ReturnValueAllNew is a ReturnValue enum value - ReturnValueAllNew = "ALL_NEW" - - // ReturnValueUpdatedNew is a ReturnValue enum value - ReturnValueUpdatedNew = "UPDATED_NEW" -) - -// ReturnValue_Values returns all elements of the ReturnValue enum -func ReturnValue_Values() []string { - return []string{ - ReturnValueNone, - ReturnValueAllOld, - ReturnValueUpdatedOld, - ReturnValueAllNew, - ReturnValueUpdatedNew, - } -} - -const ( - // ReturnValuesOnConditionCheckFailureAllOld is a ReturnValuesOnConditionCheckFailure enum value - ReturnValuesOnConditionCheckFailureAllOld = "ALL_OLD" - - // ReturnValuesOnConditionCheckFailureNone is a ReturnValuesOnConditionCheckFailure enum value - ReturnValuesOnConditionCheckFailureNone = "NONE" -) - -// ReturnValuesOnConditionCheckFailure_Values returns all elements of the ReturnValuesOnConditionCheckFailure enum -func ReturnValuesOnConditionCheckFailure_Values() []string { - return []string{ - ReturnValuesOnConditionCheckFailureAllOld, - ReturnValuesOnConditionCheckFailureNone, - } -} - -const ( - // S3SseAlgorithmAes256 is a S3SseAlgorithm enum value - S3SseAlgorithmAes256 = "AES256" - - // S3SseAlgorithmKms is a S3SseAlgorithm enum value - S3SseAlgorithmKms = "KMS" -) - -// S3SseAlgorithm_Values returns all elements of the S3SseAlgorithm enum -func S3SseAlgorithm_Values() []string { - return []string{ - S3SseAlgorithmAes256, - S3SseAlgorithmKms, - } -} - -const ( - // SSEStatusEnabling is a SSEStatus enum value - SSEStatusEnabling = "ENABLING" - - // SSEStatusEnabled is a SSEStatus enum value - SSEStatusEnabled = "ENABLED" - - // SSEStatusDisabling is a SSEStatus enum value - SSEStatusDisabling = "DISABLING" - - // SSEStatusDisabled is a SSEStatus enum value - SSEStatusDisabled = "DISABLED" - - // SSEStatusUpdating is a SSEStatus enum value - SSEStatusUpdating = "UPDATING" -) - -// SSEStatus_Values returns all elements of the SSEStatus enum -func SSEStatus_Values() []string { - return []string{ - SSEStatusEnabling, - SSEStatusEnabled, - SSEStatusDisabling, - SSEStatusDisabled, - SSEStatusUpdating, - } -} - -const ( - // SSETypeAes256 is a SSEType enum value - SSETypeAes256 = "AES256" - - // SSETypeKms is a SSEType enum value - SSETypeKms = "KMS" -) - -// SSEType_Values returns all elements of the SSEType enum -func SSEType_Values() []string { - return []string{ - SSETypeAes256, - SSETypeKms, - } -} - -const ( - // ScalarAttributeTypeS is a ScalarAttributeType enum value - ScalarAttributeTypeS = "S" - - // ScalarAttributeTypeN is a ScalarAttributeType enum value - ScalarAttributeTypeN = "N" - - // ScalarAttributeTypeB is a ScalarAttributeType enum value - ScalarAttributeTypeB = "B" -) - -// ScalarAttributeType_Values returns all elements of the ScalarAttributeType enum -func ScalarAttributeType_Values() []string { - return []string{ - ScalarAttributeTypeS, - ScalarAttributeTypeN, - ScalarAttributeTypeB, - } -} - -const ( - // SelectAllAttributes is a Select enum value - SelectAllAttributes = "ALL_ATTRIBUTES" - - // SelectAllProjectedAttributes is a Select enum value - SelectAllProjectedAttributes = "ALL_PROJECTED_ATTRIBUTES" - - // SelectSpecificAttributes is a Select enum value - SelectSpecificAttributes = "SPECIFIC_ATTRIBUTES" - - // SelectCount is a Select enum value - SelectCount = "COUNT" -) - -// Select_Values returns all elements of the Select enum -func Select_Values() []string { - return []string{ - SelectAllAttributes, - SelectAllProjectedAttributes, - SelectSpecificAttributes, - SelectCount, - } -} - -const ( - // StreamViewTypeNewImage is a StreamViewType enum value - StreamViewTypeNewImage = "NEW_IMAGE" - - // StreamViewTypeOldImage is a StreamViewType enum value - StreamViewTypeOldImage = "OLD_IMAGE" - - // StreamViewTypeNewAndOldImages is a StreamViewType enum value - StreamViewTypeNewAndOldImages = "NEW_AND_OLD_IMAGES" - - // StreamViewTypeKeysOnly is a StreamViewType enum value - StreamViewTypeKeysOnly = "KEYS_ONLY" -) - -// StreamViewType_Values returns all elements of the StreamViewType enum -func StreamViewType_Values() []string { - return []string{ - StreamViewTypeNewImage, - StreamViewTypeOldImage, - StreamViewTypeNewAndOldImages, - StreamViewTypeKeysOnly, - } -} - -const ( - // TableClassStandard is a TableClass enum value - TableClassStandard = "STANDARD" - - // TableClassStandardInfrequentAccess is a TableClass enum value - TableClassStandardInfrequentAccess = "STANDARD_INFREQUENT_ACCESS" -) - -// TableClass_Values returns all elements of the TableClass enum -func TableClass_Values() []string { - return []string{ - TableClassStandard, - TableClassStandardInfrequentAccess, - } -} - -const ( - // TableStatusCreating is a TableStatus enum value - TableStatusCreating = "CREATING" - - // TableStatusUpdating is a TableStatus enum value - TableStatusUpdating = "UPDATING" - - // TableStatusDeleting is a TableStatus enum value - TableStatusDeleting = "DELETING" - - // TableStatusActive is a TableStatus enum value - TableStatusActive = "ACTIVE" - - // TableStatusInaccessibleEncryptionCredentials is a TableStatus enum value - TableStatusInaccessibleEncryptionCredentials = "INACCESSIBLE_ENCRYPTION_CREDENTIALS" - - // TableStatusArchiving is a TableStatus enum value - TableStatusArchiving = "ARCHIVING" - - // TableStatusArchived is a TableStatus enum value - TableStatusArchived = "ARCHIVED" -) - -// TableStatus_Values returns all elements of the TableStatus enum -func TableStatus_Values() []string { - return []string{ - TableStatusCreating, - TableStatusUpdating, - TableStatusDeleting, - TableStatusActive, - TableStatusInaccessibleEncryptionCredentials, - TableStatusArchiving, - TableStatusArchived, - } -} - -const ( - // TimeToLiveStatusEnabling is a TimeToLiveStatus enum value - TimeToLiveStatusEnabling = "ENABLING" - - // TimeToLiveStatusDisabling is a TimeToLiveStatus enum value - TimeToLiveStatusDisabling = "DISABLING" - - // TimeToLiveStatusEnabled is a TimeToLiveStatus enum value - TimeToLiveStatusEnabled = "ENABLED" - - // TimeToLiveStatusDisabled is a TimeToLiveStatus enum value - TimeToLiveStatusDisabled = "DISABLED" -) - -// TimeToLiveStatus_Values returns all elements of the TimeToLiveStatus enum -func TimeToLiveStatus_Values() []string { - return []string{ - TimeToLiveStatusEnabling, - TimeToLiveStatusDisabling, - TimeToLiveStatusEnabled, - TimeToLiveStatusDisabled, - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/customizations.go deleted file mode 100644 index c019e63dfc..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/customizations.go +++ /dev/null @@ -1,98 +0,0 @@ -package dynamodb - -import ( - "bytes" - "hash/crc32" - "io" - "io/ioutil" - "strconv" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/request" -) - -func init() { - initClient = func(c *client.Client) { - if c.Config.Retryer == nil { - // Only override the retryer with a custom one if the config - // does not already contain a retryer - setCustomRetryer(c) - } - - c.Handlers.Build.PushBack(disableCompression) - c.Handlers.Unmarshal.PushFront(validateCRC32) - } -} - -func setCustomRetryer(c *client.Client) { - maxRetries := aws.IntValue(c.Config.MaxRetries) - if c.Config.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries { - maxRetries = 10 - } - - c.Retryer = client.DefaultRetryer{ - NumMaxRetries: maxRetries, - MinRetryDelay: 50 * time.Millisecond, - } -} - -func drainBody(b io.ReadCloser, length int64) (out *bytes.Buffer, err error) { - if length < 0 { - length = 0 - } - buf := bytes.NewBuffer(make([]byte, 0, length)) - - if _, err = buf.ReadFrom(b); err != nil { - return nil, err - } - if err = b.Close(); err != nil { - return nil, err - } - return buf, nil -} - -func disableCompression(r *request.Request) { - r.HTTPRequest.Header.Set("Accept-Encoding", "identity") -} - -func validateCRC32(r *request.Request) { - if r.Error != nil { - return // already have an error, no need to verify CRC - } - - // Checksum validation is off, skip - if aws.BoolValue(r.Config.DisableComputeChecksums) { - return - } - - // Try to get CRC from response - header := r.HTTPResponse.Header.Get("X-Amz-Crc32") - if header == "" { - return // No header, skip - } - - expected, err := strconv.ParseUint(header, 10, 32) - if err != nil { - return // Could not determine CRC value, skip - } - - buf, err := drainBody(r.HTTPResponse.Body, r.HTTPResponse.ContentLength) - if err != nil { // failed to read the response body, skip - return - } - - // Reset body for subsequent reads - r.HTTPResponse.Body = ioutil.NopCloser(bytes.NewReader(buf.Bytes())) - - // Compute the CRC checksum - crc := crc32.ChecksumIEEE(buf.Bytes()) - - if crc != uint32(expected) { - // CRC does not match, set a retryable error - r.Retryable = aws.Bool(true) - r.Error = awserr.New("CRC32CheckFailed", "CRC32 integrity check failed", nil) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/doc.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/doc.go deleted file mode 100644 index ab12b274f3..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/doc.go +++ /dev/null @@ -1,45 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -// Package dynamodb provides the client and types for making API -// requests to Amazon DynamoDB. -// -// Amazon DynamoDB is a fully managed NoSQL database service that provides fast -// and predictable performance with seamless scalability. DynamoDB lets you -// offload the administrative burdens of operating and scaling a distributed -// database, so that you don't have to worry about hardware provisioning, setup -// and configuration, replication, software patching, or cluster scaling. -// -// With DynamoDB, you can create database tables that can store and retrieve -// any amount of data, and serve any level of request traffic. You can scale -// up or scale down your tables' throughput capacity without downtime or performance -// degradation, and use the Amazon Web Services Management Console to monitor -// resource utilization and performance metrics. -// -// DynamoDB automatically spreads the data and traffic for your tables over -// a sufficient number of servers to handle your throughput and storage requirements, -// while maintaining consistent and fast performance. All of your data is stored -// on solid state disks (SSDs) and automatically replicated across multiple -// Availability Zones in an Amazon Web Services Region, providing built-in high -// availability and data durability. -// -// See https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10 for more information on this service. -// -// See dynamodb package documentation for more information. -// https://docs.aws.amazon.com/sdk-for-go/api/service/dynamodb/ -// -// # Using the Client -// -// To contact Amazon DynamoDB with the SDK use the New function to create -// a new service client. With that client you can make API requests to the service. -// These clients are safe to use concurrently. -// -// See the SDK's documentation for more information on how to use the SDK. -// https://docs.aws.amazon.com/sdk-for-go/api/ -// -// See aws.Config documentation for more information on configuring SDK clients. -// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config -// -// See the Amazon DynamoDB client DynamoDB for more -// information on creating client for this service. -// https://docs.aws.amazon.com/sdk-for-go/api/service/dynamodb/#New -package dynamodb diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/doc_custom.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/doc_custom.go deleted file mode 100644 index 0cca7e4b9e..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/doc_custom.go +++ /dev/null @@ -1,27 +0,0 @@ -/* -AttributeValue Marshaling and Unmarshaling Helpers - -Utility helpers to marshal and unmarshal AttributeValue to and -from Go types can be found in the dynamodbattribute sub package. This package -provides specialized functions for the common ways of working with -AttributeValues. Such as map[string]*AttributeValue, []*AttributeValue, and -directly with *AttributeValue. This is helpful for marshaling Go types for API -operations such as PutItem, and unmarshaling Query and Scan APIs' responses. - -See the dynamodbattribute package documentation for more information. -https://docs.aws.amazon.com/sdk-for-go/api/service/dynamodb/dynamodbattribute/ - -# Expression Builders - -The expression package provides utility types and functions to build DynamoDB -expression for type safe construction of API ExpressionAttributeNames, and -ExpressionAttribute Values. - -The package represents the various DynamoDB Expressions as structs named -accordingly. For example, ConditionBuilder represents a DynamoDB Condition -Expression, an UpdateBuilder represents a DynamoDB Update Expression, and so on. - -See the expression package documentation for more information. -https://docs.aws.amazon.com/sdk-for-go/api/service/dynamodb/expression/ -*/ -package dynamodb diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface/interface.go deleted file mode 100644 index e634112488..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface/interface.go +++ /dev/null @@ -1,319 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -// Package dynamodbiface provides an interface to enable mocking the Amazon DynamoDB service client -// for testing your code. -// -// It is important to note that this interface will have breaking changes -// when the service model is updated and adds new API operations, paginators, -// and waiters. -package dynamodbiface - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/service/dynamodb" -) - -// DynamoDBAPI provides an interface to enable mocking the -// dynamodb.DynamoDB service client's API operation, -// paginators, and waiters. This make unit testing your code that calls out -// to the SDK's service client's calls easier. -// -// The best way to use this interface is so the SDK's service client's calls -// can be stubbed out for unit testing your code with the SDK without needing -// to inject custom request handlers into the SDK's request pipeline. -// -// // myFunc uses an SDK service client to make a request to -// // Amazon DynamoDB. -// func myFunc(svc dynamodbiface.DynamoDBAPI) bool { -// // Make svc.BatchExecuteStatement request -// } -// -// func main() { -// sess := session.New() -// svc := dynamodb.New(sess) -// -// myFunc(svc) -// } -// -// In your _test.go file: -// -// // Define a mock struct to be used in your unit tests of myFunc. -// type mockDynamoDBClient struct { -// dynamodbiface.DynamoDBAPI -// } -// func (m *mockDynamoDBClient) BatchExecuteStatement(input *dynamodb.BatchExecuteStatementInput) (*dynamodb.BatchExecuteStatementOutput, error) { -// // mock response/functionality -// } -// -// func TestMyFunc(t *testing.T) { -// // Setup Test -// mockSvc := &mockDynamoDBClient{} -// -// myfunc(mockSvc) -// -// // Verify myFunc's functionality -// } -// -// It is important to note that this interface will have breaking changes -// when the service model is updated and adds new API operations, paginators, -// and waiters. Its suggested to use the pattern above for testing, or using -// tooling to generate mocks to satisfy the interfaces. -type DynamoDBAPI interface { - BatchExecuteStatement(*dynamodb.BatchExecuteStatementInput) (*dynamodb.BatchExecuteStatementOutput, error) - BatchExecuteStatementWithContext(aws.Context, *dynamodb.BatchExecuteStatementInput, ...request.Option) (*dynamodb.BatchExecuteStatementOutput, error) - BatchExecuteStatementRequest(*dynamodb.BatchExecuteStatementInput) (*request.Request, *dynamodb.BatchExecuteStatementOutput) - - BatchGetItem(*dynamodb.BatchGetItemInput) (*dynamodb.BatchGetItemOutput, error) - BatchGetItemWithContext(aws.Context, *dynamodb.BatchGetItemInput, ...request.Option) (*dynamodb.BatchGetItemOutput, error) - BatchGetItemRequest(*dynamodb.BatchGetItemInput) (*request.Request, *dynamodb.BatchGetItemOutput) - - BatchGetItemPages(*dynamodb.BatchGetItemInput, func(*dynamodb.BatchGetItemOutput, bool) bool) error - BatchGetItemPagesWithContext(aws.Context, *dynamodb.BatchGetItemInput, func(*dynamodb.BatchGetItemOutput, bool) bool, ...request.Option) error - - BatchWriteItem(*dynamodb.BatchWriteItemInput) (*dynamodb.BatchWriteItemOutput, error) - BatchWriteItemWithContext(aws.Context, *dynamodb.BatchWriteItemInput, ...request.Option) (*dynamodb.BatchWriteItemOutput, error) - BatchWriteItemRequest(*dynamodb.BatchWriteItemInput) (*request.Request, *dynamodb.BatchWriteItemOutput) - - CreateBackup(*dynamodb.CreateBackupInput) (*dynamodb.CreateBackupOutput, error) - CreateBackupWithContext(aws.Context, *dynamodb.CreateBackupInput, ...request.Option) (*dynamodb.CreateBackupOutput, error) - CreateBackupRequest(*dynamodb.CreateBackupInput) (*request.Request, *dynamodb.CreateBackupOutput) - - CreateGlobalTable(*dynamodb.CreateGlobalTableInput) (*dynamodb.CreateGlobalTableOutput, error) - CreateGlobalTableWithContext(aws.Context, *dynamodb.CreateGlobalTableInput, ...request.Option) (*dynamodb.CreateGlobalTableOutput, error) - CreateGlobalTableRequest(*dynamodb.CreateGlobalTableInput) (*request.Request, *dynamodb.CreateGlobalTableOutput) - - CreateTable(*dynamodb.CreateTableInput) (*dynamodb.CreateTableOutput, error) - CreateTableWithContext(aws.Context, *dynamodb.CreateTableInput, ...request.Option) (*dynamodb.CreateTableOutput, error) - CreateTableRequest(*dynamodb.CreateTableInput) (*request.Request, *dynamodb.CreateTableOutput) - - DeleteBackup(*dynamodb.DeleteBackupInput) (*dynamodb.DeleteBackupOutput, error) - DeleteBackupWithContext(aws.Context, *dynamodb.DeleteBackupInput, ...request.Option) (*dynamodb.DeleteBackupOutput, error) - DeleteBackupRequest(*dynamodb.DeleteBackupInput) (*request.Request, *dynamodb.DeleteBackupOutput) - - DeleteItem(*dynamodb.DeleteItemInput) (*dynamodb.DeleteItemOutput, error) - DeleteItemWithContext(aws.Context, *dynamodb.DeleteItemInput, ...request.Option) (*dynamodb.DeleteItemOutput, error) - DeleteItemRequest(*dynamodb.DeleteItemInput) (*request.Request, *dynamodb.DeleteItemOutput) - - DeleteResourcePolicy(*dynamodb.DeleteResourcePolicyInput) (*dynamodb.DeleteResourcePolicyOutput, error) - DeleteResourcePolicyWithContext(aws.Context, *dynamodb.DeleteResourcePolicyInput, ...request.Option) (*dynamodb.DeleteResourcePolicyOutput, error) - DeleteResourcePolicyRequest(*dynamodb.DeleteResourcePolicyInput) (*request.Request, *dynamodb.DeleteResourcePolicyOutput) - - DeleteTable(*dynamodb.DeleteTableInput) (*dynamodb.DeleteTableOutput, error) - DeleteTableWithContext(aws.Context, *dynamodb.DeleteTableInput, ...request.Option) (*dynamodb.DeleteTableOutput, error) - DeleteTableRequest(*dynamodb.DeleteTableInput) (*request.Request, *dynamodb.DeleteTableOutput) - - DescribeBackup(*dynamodb.DescribeBackupInput) (*dynamodb.DescribeBackupOutput, error) - DescribeBackupWithContext(aws.Context, *dynamodb.DescribeBackupInput, ...request.Option) (*dynamodb.DescribeBackupOutput, error) - DescribeBackupRequest(*dynamodb.DescribeBackupInput) (*request.Request, *dynamodb.DescribeBackupOutput) - - DescribeContinuousBackups(*dynamodb.DescribeContinuousBackupsInput) (*dynamodb.DescribeContinuousBackupsOutput, error) - DescribeContinuousBackupsWithContext(aws.Context, *dynamodb.DescribeContinuousBackupsInput, ...request.Option) (*dynamodb.DescribeContinuousBackupsOutput, error) - DescribeContinuousBackupsRequest(*dynamodb.DescribeContinuousBackupsInput) (*request.Request, *dynamodb.DescribeContinuousBackupsOutput) - - DescribeContributorInsights(*dynamodb.DescribeContributorInsightsInput) (*dynamodb.DescribeContributorInsightsOutput, error) - DescribeContributorInsightsWithContext(aws.Context, *dynamodb.DescribeContributorInsightsInput, ...request.Option) (*dynamodb.DescribeContributorInsightsOutput, error) - DescribeContributorInsightsRequest(*dynamodb.DescribeContributorInsightsInput) (*request.Request, *dynamodb.DescribeContributorInsightsOutput) - - DescribeEndpoints(*dynamodb.DescribeEndpointsInput) (*dynamodb.DescribeEndpointsOutput, error) - DescribeEndpointsWithContext(aws.Context, *dynamodb.DescribeEndpointsInput, ...request.Option) (*dynamodb.DescribeEndpointsOutput, error) - DescribeEndpointsRequest(*dynamodb.DescribeEndpointsInput) (*request.Request, *dynamodb.DescribeEndpointsOutput) - - DescribeExport(*dynamodb.DescribeExportInput) (*dynamodb.DescribeExportOutput, error) - DescribeExportWithContext(aws.Context, *dynamodb.DescribeExportInput, ...request.Option) (*dynamodb.DescribeExportOutput, error) - DescribeExportRequest(*dynamodb.DescribeExportInput) (*request.Request, *dynamodb.DescribeExportOutput) - - DescribeGlobalTable(*dynamodb.DescribeGlobalTableInput) (*dynamodb.DescribeGlobalTableOutput, error) - DescribeGlobalTableWithContext(aws.Context, *dynamodb.DescribeGlobalTableInput, ...request.Option) (*dynamodb.DescribeGlobalTableOutput, error) - DescribeGlobalTableRequest(*dynamodb.DescribeGlobalTableInput) (*request.Request, *dynamodb.DescribeGlobalTableOutput) - - DescribeGlobalTableSettings(*dynamodb.DescribeGlobalTableSettingsInput) (*dynamodb.DescribeGlobalTableSettingsOutput, error) - DescribeGlobalTableSettingsWithContext(aws.Context, *dynamodb.DescribeGlobalTableSettingsInput, ...request.Option) (*dynamodb.DescribeGlobalTableSettingsOutput, error) - DescribeGlobalTableSettingsRequest(*dynamodb.DescribeGlobalTableSettingsInput) (*request.Request, *dynamodb.DescribeGlobalTableSettingsOutput) - - DescribeImport(*dynamodb.DescribeImportInput) (*dynamodb.DescribeImportOutput, error) - DescribeImportWithContext(aws.Context, *dynamodb.DescribeImportInput, ...request.Option) (*dynamodb.DescribeImportOutput, error) - DescribeImportRequest(*dynamodb.DescribeImportInput) (*request.Request, *dynamodb.DescribeImportOutput) - - DescribeKinesisStreamingDestination(*dynamodb.DescribeKinesisStreamingDestinationInput) (*dynamodb.DescribeKinesisStreamingDestinationOutput, error) - DescribeKinesisStreamingDestinationWithContext(aws.Context, *dynamodb.DescribeKinesisStreamingDestinationInput, ...request.Option) (*dynamodb.DescribeKinesisStreamingDestinationOutput, error) - DescribeKinesisStreamingDestinationRequest(*dynamodb.DescribeKinesisStreamingDestinationInput) (*request.Request, *dynamodb.DescribeKinesisStreamingDestinationOutput) - - DescribeLimits(*dynamodb.DescribeLimitsInput) (*dynamodb.DescribeLimitsOutput, error) - DescribeLimitsWithContext(aws.Context, *dynamodb.DescribeLimitsInput, ...request.Option) (*dynamodb.DescribeLimitsOutput, error) - DescribeLimitsRequest(*dynamodb.DescribeLimitsInput) (*request.Request, *dynamodb.DescribeLimitsOutput) - - DescribeTable(*dynamodb.DescribeTableInput) (*dynamodb.DescribeTableOutput, error) - DescribeTableWithContext(aws.Context, *dynamodb.DescribeTableInput, ...request.Option) (*dynamodb.DescribeTableOutput, error) - DescribeTableRequest(*dynamodb.DescribeTableInput) (*request.Request, *dynamodb.DescribeTableOutput) - - DescribeTableReplicaAutoScaling(*dynamodb.DescribeTableReplicaAutoScalingInput) (*dynamodb.DescribeTableReplicaAutoScalingOutput, error) - DescribeTableReplicaAutoScalingWithContext(aws.Context, *dynamodb.DescribeTableReplicaAutoScalingInput, ...request.Option) (*dynamodb.DescribeTableReplicaAutoScalingOutput, error) - DescribeTableReplicaAutoScalingRequest(*dynamodb.DescribeTableReplicaAutoScalingInput) (*request.Request, *dynamodb.DescribeTableReplicaAutoScalingOutput) - - DescribeTimeToLive(*dynamodb.DescribeTimeToLiveInput) (*dynamodb.DescribeTimeToLiveOutput, error) - DescribeTimeToLiveWithContext(aws.Context, *dynamodb.DescribeTimeToLiveInput, ...request.Option) (*dynamodb.DescribeTimeToLiveOutput, error) - DescribeTimeToLiveRequest(*dynamodb.DescribeTimeToLiveInput) (*request.Request, *dynamodb.DescribeTimeToLiveOutput) - - DisableKinesisStreamingDestination(*dynamodb.DisableKinesisStreamingDestinationInput) (*dynamodb.DisableKinesisStreamingDestinationOutput, error) - DisableKinesisStreamingDestinationWithContext(aws.Context, *dynamodb.DisableKinesisStreamingDestinationInput, ...request.Option) (*dynamodb.DisableKinesisStreamingDestinationOutput, error) - DisableKinesisStreamingDestinationRequest(*dynamodb.DisableKinesisStreamingDestinationInput) (*request.Request, *dynamodb.DisableKinesisStreamingDestinationOutput) - - EnableKinesisStreamingDestination(*dynamodb.EnableKinesisStreamingDestinationInput) (*dynamodb.EnableKinesisStreamingDestinationOutput, error) - EnableKinesisStreamingDestinationWithContext(aws.Context, *dynamodb.EnableKinesisStreamingDestinationInput, ...request.Option) (*dynamodb.EnableKinesisStreamingDestinationOutput, error) - EnableKinesisStreamingDestinationRequest(*dynamodb.EnableKinesisStreamingDestinationInput) (*request.Request, *dynamodb.EnableKinesisStreamingDestinationOutput) - - ExecuteStatement(*dynamodb.ExecuteStatementInput) (*dynamodb.ExecuteStatementOutput, error) - ExecuteStatementWithContext(aws.Context, *dynamodb.ExecuteStatementInput, ...request.Option) (*dynamodb.ExecuteStatementOutput, error) - ExecuteStatementRequest(*dynamodb.ExecuteStatementInput) (*request.Request, *dynamodb.ExecuteStatementOutput) - - ExecuteTransaction(*dynamodb.ExecuteTransactionInput) (*dynamodb.ExecuteTransactionOutput, error) - ExecuteTransactionWithContext(aws.Context, *dynamodb.ExecuteTransactionInput, ...request.Option) (*dynamodb.ExecuteTransactionOutput, error) - ExecuteTransactionRequest(*dynamodb.ExecuteTransactionInput) (*request.Request, *dynamodb.ExecuteTransactionOutput) - - ExportTableToPointInTime(*dynamodb.ExportTableToPointInTimeInput) (*dynamodb.ExportTableToPointInTimeOutput, error) - ExportTableToPointInTimeWithContext(aws.Context, *dynamodb.ExportTableToPointInTimeInput, ...request.Option) (*dynamodb.ExportTableToPointInTimeOutput, error) - ExportTableToPointInTimeRequest(*dynamodb.ExportTableToPointInTimeInput) (*request.Request, *dynamodb.ExportTableToPointInTimeOutput) - - GetItem(*dynamodb.GetItemInput) (*dynamodb.GetItemOutput, error) - GetItemWithContext(aws.Context, *dynamodb.GetItemInput, ...request.Option) (*dynamodb.GetItemOutput, error) - GetItemRequest(*dynamodb.GetItemInput) (*request.Request, *dynamodb.GetItemOutput) - - GetResourcePolicy(*dynamodb.GetResourcePolicyInput) (*dynamodb.GetResourcePolicyOutput, error) - GetResourcePolicyWithContext(aws.Context, *dynamodb.GetResourcePolicyInput, ...request.Option) (*dynamodb.GetResourcePolicyOutput, error) - GetResourcePolicyRequest(*dynamodb.GetResourcePolicyInput) (*request.Request, *dynamodb.GetResourcePolicyOutput) - - ImportTable(*dynamodb.ImportTableInput) (*dynamodb.ImportTableOutput, error) - ImportTableWithContext(aws.Context, *dynamodb.ImportTableInput, ...request.Option) (*dynamodb.ImportTableOutput, error) - ImportTableRequest(*dynamodb.ImportTableInput) (*request.Request, *dynamodb.ImportTableOutput) - - ListBackups(*dynamodb.ListBackupsInput) (*dynamodb.ListBackupsOutput, error) - ListBackupsWithContext(aws.Context, *dynamodb.ListBackupsInput, ...request.Option) (*dynamodb.ListBackupsOutput, error) - ListBackupsRequest(*dynamodb.ListBackupsInput) (*request.Request, *dynamodb.ListBackupsOutput) - - ListContributorInsights(*dynamodb.ListContributorInsightsInput) (*dynamodb.ListContributorInsightsOutput, error) - ListContributorInsightsWithContext(aws.Context, *dynamodb.ListContributorInsightsInput, ...request.Option) (*dynamodb.ListContributorInsightsOutput, error) - ListContributorInsightsRequest(*dynamodb.ListContributorInsightsInput) (*request.Request, *dynamodb.ListContributorInsightsOutput) - - ListContributorInsightsPages(*dynamodb.ListContributorInsightsInput, func(*dynamodb.ListContributorInsightsOutput, bool) bool) error - ListContributorInsightsPagesWithContext(aws.Context, *dynamodb.ListContributorInsightsInput, func(*dynamodb.ListContributorInsightsOutput, bool) bool, ...request.Option) error - - ListExports(*dynamodb.ListExportsInput) (*dynamodb.ListExportsOutput, error) - ListExportsWithContext(aws.Context, *dynamodb.ListExportsInput, ...request.Option) (*dynamodb.ListExportsOutput, error) - ListExportsRequest(*dynamodb.ListExportsInput) (*request.Request, *dynamodb.ListExportsOutput) - - ListExportsPages(*dynamodb.ListExportsInput, func(*dynamodb.ListExportsOutput, bool) bool) error - ListExportsPagesWithContext(aws.Context, *dynamodb.ListExportsInput, func(*dynamodb.ListExportsOutput, bool) bool, ...request.Option) error - - ListGlobalTables(*dynamodb.ListGlobalTablesInput) (*dynamodb.ListGlobalTablesOutput, error) - ListGlobalTablesWithContext(aws.Context, *dynamodb.ListGlobalTablesInput, ...request.Option) (*dynamodb.ListGlobalTablesOutput, error) - ListGlobalTablesRequest(*dynamodb.ListGlobalTablesInput) (*request.Request, *dynamodb.ListGlobalTablesOutput) - - ListImports(*dynamodb.ListImportsInput) (*dynamodb.ListImportsOutput, error) - ListImportsWithContext(aws.Context, *dynamodb.ListImportsInput, ...request.Option) (*dynamodb.ListImportsOutput, error) - ListImportsRequest(*dynamodb.ListImportsInput) (*request.Request, *dynamodb.ListImportsOutput) - - ListImportsPages(*dynamodb.ListImportsInput, func(*dynamodb.ListImportsOutput, bool) bool) error - ListImportsPagesWithContext(aws.Context, *dynamodb.ListImportsInput, func(*dynamodb.ListImportsOutput, bool) bool, ...request.Option) error - - ListTables(*dynamodb.ListTablesInput) (*dynamodb.ListTablesOutput, error) - ListTablesWithContext(aws.Context, *dynamodb.ListTablesInput, ...request.Option) (*dynamodb.ListTablesOutput, error) - ListTablesRequest(*dynamodb.ListTablesInput) (*request.Request, *dynamodb.ListTablesOutput) - - ListTablesPages(*dynamodb.ListTablesInput, func(*dynamodb.ListTablesOutput, bool) bool) error - ListTablesPagesWithContext(aws.Context, *dynamodb.ListTablesInput, func(*dynamodb.ListTablesOutput, bool) bool, ...request.Option) error - - ListTagsOfResource(*dynamodb.ListTagsOfResourceInput) (*dynamodb.ListTagsOfResourceOutput, error) - ListTagsOfResourceWithContext(aws.Context, *dynamodb.ListTagsOfResourceInput, ...request.Option) (*dynamodb.ListTagsOfResourceOutput, error) - ListTagsOfResourceRequest(*dynamodb.ListTagsOfResourceInput) (*request.Request, *dynamodb.ListTagsOfResourceOutput) - - PutItem(*dynamodb.PutItemInput) (*dynamodb.PutItemOutput, error) - PutItemWithContext(aws.Context, *dynamodb.PutItemInput, ...request.Option) (*dynamodb.PutItemOutput, error) - PutItemRequest(*dynamodb.PutItemInput) (*request.Request, *dynamodb.PutItemOutput) - - PutResourcePolicy(*dynamodb.PutResourcePolicyInput) (*dynamodb.PutResourcePolicyOutput, error) - PutResourcePolicyWithContext(aws.Context, *dynamodb.PutResourcePolicyInput, ...request.Option) (*dynamodb.PutResourcePolicyOutput, error) - PutResourcePolicyRequest(*dynamodb.PutResourcePolicyInput) (*request.Request, *dynamodb.PutResourcePolicyOutput) - - Query(*dynamodb.QueryInput) (*dynamodb.QueryOutput, error) - QueryWithContext(aws.Context, *dynamodb.QueryInput, ...request.Option) (*dynamodb.QueryOutput, error) - QueryRequest(*dynamodb.QueryInput) (*request.Request, *dynamodb.QueryOutput) - - QueryPages(*dynamodb.QueryInput, func(*dynamodb.QueryOutput, bool) bool) error - QueryPagesWithContext(aws.Context, *dynamodb.QueryInput, func(*dynamodb.QueryOutput, bool) bool, ...request.Option) error - - RestoreTableFromBackup(*dynamodb.RestoreTableFromBackupInput) (*dynamodb.RestoreTableFromBackupOutput, error) - RestoreTableFromBackupWithContext(aws.Context, *dynamodb.RestoreTableFromBackupInput, ...request.Option) (*dynamodb.RestoreTableFromBackupOutput, error) - RestoreTableFromBackupRequest(*dynamodb.RestoreTableFromBackupInput) (*request.Request, *dynamodb.RestoreTableFromBackupOutput) - - RestoreTableToPointInTime(*dynamodb.RestoreTableToPointInTimeInput) (*dynamodb.RestoreTableToPointInTimeOutput, error) - RestoreTableToPointInTimeWithContext(aws.Context, *dynamodb.RestoreTableToPointInTimeInput, ...request.Option) (*dynamodb.RestoreTableToPointInTimeOutput, error) - RestoreTableToPointInTimeRequest(*dynamodb.RestoreTableToPointInTimeInput) (*request.Request, *dynamodb.RestoreTableToPointInTimeOutput) - - Scan(*dynamodb.ScanInput) (*dynamodb.ScanOutput, error) - ScanWithContext(aws.Context, *dynamodb.ScanInput, ...request.Option) (*dynamodb.ScanOutput, error) - ScanRequest(*dynamodb.ScanInput) (*request.Request, *dynamodb.ScanOutput) - - ScanPages(*dynamodb.ScanInput, func(*dynamodb.ScanOutput, bool) bool) error - ScanPagesWithContext(aws.Context, *dynamodb.ScanInput, func(*dynamodb.ScanOutput, bool) bool, ...request.Option) error - - TagResource(*dynamodb.TagResourceInput) (*dynamodb.TagResourceOutput, error) - TagResourceWithContext(aws.Context, *dynamodb.TagResourceInput, ...request.Option) (*dynamodb.TagResourceOutput, error) - TagResourceRequest(*dynamodb.TagResourceInput) (*request.Request, *dynamodb.TagResourceOutput) - - TransactGetItems(*dynamodb.TransactGetItemsInput) (*dynamodb.TransactGetItemsOutput, error) - TransactGetItemsWithContext(aws.Context, *dynamodb.TransactGetItemsInput, ...request.Option) (*dynamodb.TransactGetItemsOutput, error) - TransactGetItemsRequest(*dynamodb.TransactGetItemsInput) (*request.Request, *dynamodb.TransactGetItemsOutput) - - TransactWriteItems(*dynamodb.TransactWriteItemsInput) (*dynamodb.TransactWriteItemsOutput, error) - TransactWriteItemsWithContext(aws.Context, *dynamodb.TransactWriteItemsInput, ...request.Option) (*dynamodb.TransactWriteItemsOutput, error) - TransactWriteItemsRequest(*dynamodb.TransactWriteItemsInput) (*request.Request, *dynamodb.TransactWriteItemsOutput) - - UntagResource(*dynamodb.UntagResourceInput) (*dynamodb.UntagResourceOutput, error) - UntagResourceWithContext(aws.Context, *dynamodb.UntagResourceInput, ...request.Option) (*dynamodb.UntagResourceOutput, error) - UntagResourceRequest(*dynamodb.UntagResourceInput) (*request.Request, *dynamodb.UntagResourceOutput) - - UpdateContinuousBackups(*dynamodb.UpdateContinuousBackupsInput) (*dynamodb.UpdateContinuousBackupsOutput, error) - UpdateContinuousBackupsWithContext(aws.Context, *dynamodb.UpdateContinuousBackupsInput, ...request.Option) (*dynamodb.UpdateContinuousBackupsOutput, error) - UpdateContinuousBackupsRequest(*dynamodb.UpdateContinuousBackupsInput) (*request.Request, *dynamodb.UpdateContinuousBackupsOutput) - - UpdateContributorInsights(*dynamodb.UpdateContributorInsightsInput) (*dynamodb.UpdateContributorInsightsOutput, error) - UpdateContributorInsightsWithContext(aws.Context, *dynamodb.UpdateContributorInsightsInput, ...request.Option) (*dynamodb.UpdateContributorInsightsOutput, error) - UpdateContributorInsightsRequest(*dynamodb.UpdateContributorInsightsInput) (*request.Request, *dynamodb.UpdateContributorInsightsOutput) - - UpdateGlobalTable(*dynamodb.UpdateGlobalTableInput) (*dynamodb.UpdateGlobalTableOutput, error) - UpdateGlobalTableWithContext(aws.Context, *dynamodb.UpdateGlobalTableInput, ...request.Option) (*dynamodb.UpdateGlobalTableOutput, error) - UpdateGlobalTableRequest(*dynamodb.UpdateGlobalTableInput) (*request.Request, *dynamodb.UpdateGlobalTableOutput) - - UpdateGlobalTableSettings(*dynamodb.UpdateGlobalTableSettingsInput) (*dynamodb.UpdateGlobalTableSettingsOutput, error) - UpdateGlobalTableSettingsWithContext(aws.Context, *dynamodb.UpdateGlobalTableSettingsInput, ...request.Option) (*dynamodb.UpdateGlobalTableSettingsOutput, error) - UpdateGlobalTableSettingsRequest(*dynamodb.UpdateGlobalTableSettingsInput) (*request.Request, *dynamodb.UpdateGlobalTableSettingsOutput) - - UpdateItem(*dynamodb.UpdateItemInput) (*dynamodb.UpdateItemOutput, error) - UpdateItemWithContext(aws.Context, *dynamodb.UpdateItemInput, ...request.Option) (*dynamodb.UpdateItemOutput, error) - UpdateItemRequest(*dynamodb.UpdateItemInput) (*request.Request, *dynamodb.UpdateItemOutput) - - UpdateKinesisStreamingDestination(*dynamodb.UpdateKinesisStreamingDestinationInput) (*dynamodb.UpdateKinesisStreamingDestinationOutput, error) - UpdateKinesisStreamingDestinationWithContext(aws.Context, *dynamodb.UpdateKinesisStreamingDestinationInput, ...request.Option) (*dynamodb.UpdateKinesisStreamingDestinationOutput, error) - UpdateKinesisStreamingDestinationRequest(*dynamodb.UpdateKinesisStreamingDestinationInput) (*request.Request, *dynamodb.UpdateKinesisStreamingDestinationOutput) - - UpdateTable(*dynamodb.UpdateTableInput) (*dynamodb.UpdateTableOutput, error) - UpdateTableWithContext(aws.Context, *dynamodb.UpdateTableInput, ...request.Option) (*dynamodb.UpdateTableOutput, error) - UpdateTableRequest(*dynamodb.UpdateTableInput) (*request.Request, *dynamodb.UpdateTableOutput) - - UpdateTableReplicaAutoScaling(*dynamodb.UpdateTableReplicaAutoScalingInput) (*dynamodb.UpdateTableReplicaAutoScalingOutput, error) - UpdateTableReplicaAutoScalingWithContext(aws.Context, *dynamodb.UpdateTableReplicaAutoScalingInput, ...request.Option) (*dynamodb.UpdateTableReplicaAutoScalingOutput, error) - UpdateTableReplicaAutoScalingRequest(*dynamodb.UpdateTableReplicaAutoScalingInput) (*request.Request, *dynamodb.UpdateTableReplicaAutoScalingOutput) - - UpdateTimeToLive(*dynamodb.UpdateTimeToLiveInput) (*dynamodb.UpdateTimeToLiveOutput, error) - UpdateTimeToLiveWithContext(aws.Context, *dynamodb.UpdateTimeToLiveInput, ...request.Option) (*dynamodb.UpdateTimeToLiveOutput, error) - UpdateTimeToLiveRequest(*dynamodb.UpdateTimeToLiveInput) (*request.Request, *dynamodb.UpdateTimeToLiveOutput) - - WaitUntilTableExists(*dynamodb.DescribeTableInput) error - WaitUntilTableExistsWithContext(aws.Context, *dynamodb.DescribeTableInput, ...request.WaiterOption) error - - WaitUntilTableNotExists(*dynamodb.DescribeTableInput) error - WaitUntilTableNotExistsWithContext(aws.Context, *dynamodb.DescribeTableInput, ...request.WaiterOption) error -} - -var _ DynamoDBAPI = (*dynamodb.DynamoDB)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/errors.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/errors.go deleted file mode 100644 index 2ef2cab532..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/errors.go +++ /dev/null @@ -1,408 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package dynamodb - -import ( - "github.com/aws/aws-sdk-go/private/protocol" -) - -const ( - - // ErrCodeBackupInUseException for service response error code - // "BackupInUseException". - // - // There is another ongoing conflicting backup control plane operation on the - // table. The backup is either being created, deleted or restored to a table. - ErrCodeBackupInUseException = "BackupInUseException" - - // ErrCodeBackupNotFoundException for service response error code - // "BackupNotFoundException". - // - // Backup not found for the given BackupARN. - ErrCodeBackupNotFoundException = "BackupNotFoundException" - - // ErrCodeConditionalCheckFailedException for service response error code - // "ConditionalCheckFailedException". - // - // A condition specified in the operation could not be evaluated. - ErrCodeConditionalCheckFailedException = "ConditionalCheckFailedException" - - // ErrCodeContinuousBackupsUnavailableException for service response error code - // "ContinuousBackupsUnavailableException". - // - // Backups have not yet been enabled for this table. - ErrCodeContinuousBackupsUnavailableException = "ContinuousBackupsUnavailableException" - - // ErrCodeDuplicateItemException for service response error code - // "DuplicateItemException". - // - // There was an attempt to insert an item with the same primary key as an item - // that already exists in the DynamoDB table. - ErrCodeDuplicateItemException = "DuplicateItemException" - - // ErrCodeExportConflictException for service response error code - // "ExportConflictException". - // - // There was a conflict when writing to the specified S3 bucket. - ErrCodeExportConflictException = "ExportConflictException" - - // ErrCodeExportNotFoundException for service response error code - // "ExportNotFoundException". - // - // The specified export was not found. - ErrCodeExportNotFoundException = "ExportNotFoundException" - - // ErrCodeGlobalTableAlreadyExistsException for service response error code - // "GlobalTableAlreadyExistsException". - // - // The specified global table already exists. - ErrCodeGlobalTableAlreadyExistsException = "GlobalTableAlreadyExistsException" - - // ErrCodeGlobalTableNotFoundException for service response error code - // "GlobalTableNotFoundException". - // - // The specified global table does not exist. - ErrCodeGlobalTableNotFoundException = "GlobalTableNotFoundException" - - // ErrCodeIdempotentParameterMismatchException for service response error code - // "IdempotentParameterMismatchException". - // - // DynamoDB rejected the request because you retried a request with a different - // payload but with an idempotent token that was already used. - ErrCodeIdempotentParameterMismatchException = "IdempotentParameterMismatchException" - - // ErrCodeImportConflictException for service response error code - // "ImportConflictException". - // - // There was a conflict when importing from the specified S3 source. This can - // occur when the current import conflicts with a previous import request that - // had the same client token. - ErrCodeImportConflictException = "ImportConflictException" - - // ErrCodeImportNotFoundException for service response error code - // "ImportNotFoundException". - // - // The specified import was not found. - ErrCodeImportNotFoundException = "ImportNotFoundException" - - // ErrCodeIndexNotFoundException for service response error code - // "IndexNotFoundException". - // - // The operation tried to access a nonexistent index. - ErrCodeIndexNotFoundException = "IndexNotFoundException" - - // ErrCodeInternalServerError for service response error code - // "InternalServerError". - // - // An error occurred on the server side. - ErrCodeInternalServerError = "InternalServerError" - - // ErrCodeInvalidExportTimeException for service response error code - // "InvalidExportTimeException". - // - // The specified ExportTime is outside of the point in time recovery window. - ErrCodeInvalidExportTimeException = "InvalidExportTimeException" - - // ErrCodeInvalidRestoreTimeException for service response error code - // "InvalidRestoreTimeException". - // - // An invalid restore time was specified. RestoreDateTime must be between EarliestRestorableDateTime - // and LatestRestorableDateTime. - ErrCodeInvalidRestoreTimeException = "InvalidRestoreTimeException" - - // ErrCodeItemCollectionSizeLimitExceededException for service response error code - // "ItemCollectionSizeLimitExceededException". - // - // An item collection is too large. This exception is only returned for tables - // that have one or more local secondary indexes. - ErrCodeItemCollectionSizeLimitExceededException = "ItemCollectionSizeLimitExceededException" - - // ErrCodeLimitExceededException for service response error code - // "LimitExceededException". - // - // There is no limit to the number of daily on-demand backups that can be taken. - // - // For most purposes, up to 500 simultaneous table operations are allowed per - // account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, - // RestoreTableFromBackup, and RestoreTableToPointInTime. - // - // When you are creating a table with one or more secondary indexes, you can - // have up to 250 such requests running at a time. However, if the table or - // index specifications are complex, then DynamoDB might temporarily reduce - // the number of concurrent operations. - // - // When importing into DynamoDB, up to 50 simultaneous import table operations - // are allowed per account. - // - // There is a soft account quota of 2,500 tables. - // - // GetRecords was called with a value of more than 1000 for the limit request - // parameter. - // - // More than 2 processes are reading from the same streams shard at the same - // time. Exceeding this limit may result in request throttling. - ErrCodeLimitExceededException = "LimitExceededException" - - // ErrCodePointInTimeRecoveryUnavailableException for service response error code - // "PointInTimeRecoveryUnavailableException". - // - // Point in time recovery has not yet been enabled for this source table. - ErrCodePointInTimeRecoveryUnavailableException = "PointInTimeRecoveryUnavailableException" - - // ErrCodePolicyNotFoundException for service response error code - // "PolicyNotFoundException". - // - // The operation tried to access a nonexistent resource-based policy. - // - // If you specified an ExpectedRevisionId, it's possible that a policy is present - // for the resource but its revision ID didn't match the expected value. - ErrCodePolicyNotFoundException = "PolicyNotFoundException" - - // ErrCodeProvisionedThroughputExceededException for service response error code - // "ProvisionedThroughputExceededException". - // - // Your request rate is too high. The Amazon Web Services SDKs for DynamoDB - // automatically retry requests that receive this exception. Your request is - // eventually successful, unless your retry queue is too large to finish. Reduce - // the frequency of requests and use exponential backoff. For more information, - // go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) - // in the Amazon DynamoDB Developer Guide. - ErrCodeProvisionedThroughputExceededException = "ProvisionedThroughputExceededException" - - // ErrCodeReplicaAlreadyExistsException for service response error code - // "ReplicaAlreadyExistsException". - // - // The specified replica is already part of the global table. - ErrCodeReplicaAlreadyExistsException = "ReplicaAlreadyExistsException" - - // ErrCodeReplicaNotFoundException for service response error code - // "ReplicaNotFoundException". - // - // The specified replica is no longer part of the global table. - ErrCodeReplicaNotFoundException = "ReplicaNotFoundException" - - // ErrCodeRequestLimitExceeded for service response error code - // "RequestLimitExceeded". - // - // Throughput exceeds the current throughput quota for your account. Please - // contact Amazon Web Services Support (https://aws.amazon.com/support) to request - // a quota increase. - ErrCodeRequestLimitExceeded = "RequestLimitExceeded" - - // ErrCodeResourceInUseException for service response error code - // "ResourceInUseException". - // - // The operation conflicts with the resource's availability. For example, you - // attempted to recreate an existing table, or tried to delete a table currently - // in the CREATING state. - ErrCodeResourceInUseException = "ResourceInUseException" - - // ErrCodeResourceNotFoundException for service response error code - // "ResourceNotFoundException". - // - // The operation tried to access a nonexistent table or index. The resource - // might not be specified correctly, or its status might not be ACTIVE. - ErrCodeResourceNotFoundException = "ResourceNotFoundException" - - // ErrCodeTableAlreadyExistsException for service response error code - // "TableAlreadyExistsException". - // - // A target table with the specified name already exists. - ErrCodeTableAlreadyExistsException = "TableAlreadyExistsException" - - // ErrCodeTableInUseException for service response error code - // "TableInUseException". - // - // A target table with the specified name is either being created or deleted. - ErrCodeTableInUseException = "TableInUseException" - - // ErrCodeTableNotFoundException for service response error code - // "TableNotFoundException". - // - // A source table with the name TableName does not currently exist within the - // subscriber's account or the subscriber is operating in the wrong Amazon Web - // Services Region. - ErrCodeTableNotFoundException = "TableNotFoundException" - - // ErrCodeTransactionCanceledException for service response error code - // "TransactionCanceledException". - // - // The entire transaction request was canceled. - // - // DynamoDB cancels a TransactWriteItems request under the following circumstances: - // - // * A condition in one of the condition expressions is not met. - // - // * A table in the TransactWriteItems request is in a different account - // or region. - // - // * More than one action in the TransactWriteItems operation targets the - // same item. - // - // * There is insufficient provisioned capacity for the transaction to be - // completed. - // - // * An item size becomes too large (larger than 400 KB), or a local secondary - // index (LSI) becomes too large, or a similar validation error occurs because - // of changes made by the transaction. - // - // * There is a user error, such as an invalid data format. - // - // * There is an ongoing TransactWriteItems operation that conflicts with - // a concurrent TransactWriteItems request. In this case the TransactWriteItems - // operation fails with a TransactionCanceledException. - // - // DynamoDB cancels a TransactGetItems request under the following circumstances: - // - // * There is an ongoing TransactGetItems operation that conflicts with a - // concurrent PutItem, UpdateItem, DeleteItem or TransactWriteItems request. - // In this case the TransactGetItems operation fails with a TransactionCanceledException. - // - // * A table in the TransactGetItems request is in a different account or - // region. - // - // * There is insufficient provisioned capacity for the transaction to be - // completed. - // - // * There is a user error, such as an invalid data format. - // - // If using Java, DynamoDB lists the cancellation reasons on the CancellationReasons - // property. This property is not set for other languages. Transaction cancellation - // reasons are ordered in the order of requested items, if an item has no error - // it will have None code and Null message. - // - // Cancellation reason codes and possible error messages: - // - // * No Errors: Code: None Message: null - // - // * Conditional Check Failed: Code: ConditionalCheckFailed Message: The - // conditional request failed. - // - // * Item Collection Size Limit Exceeded: Code: ItemCollectionSizeLimitExceeded - // Message: Collection size exceeded. - // - // * Transaction Conflict: Code: TransactionConflict Message: Transaction - // is ongoing for the item. - // - // * Provisioned Throughput Exceeded: Code: ProvisionedThroughputExceeded - // Messages: The level of configured provisioned throughput for the table - // was exceeded. Consider increasing your provisioning level with the UpdateTable - // API. This Message is received when provisioned throughput is exceeded - // is on a provisioned DynamoDB table. The level of configured provisioned - // throughput for one or more global secondary indexes of the table was exceeded. - // Consider increasing your provisioning level for the under-provisioned - // global secondary indexes with the UpdateTable API. This message is returned - // when provisioned throughput is exceeded is on a provisioned GSI. - // - // * Throttling Error: Code: ThrottlingError Messages: Throughput exceeds - // the current capacity of your table or index. DynamoDB is automatically - // scaling your table or index so please try again shortly. If exceptions - // persist, check if you have a hot key: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/bp-partition-key-design.html. - // This message is returned when writes get throttled on an On-Demand table - // as DynamoDB is automatically scaling the table. Throughput exceeds the - // current capacity for one or more global secondary indexes. DynamoDB is - // automatically scaling your index so please try again shortly. This message - // is returned when writes get throttled on an On-Demand GSI as DynamoDB - // is automatically scaling the GSI. - // - // * Validation Error: Code: ValidationError Messages: One or more parameter - // values were invalid. The update expression attempted to update the secondary - // index key beyond allowed size limits. The update expression attempted - // to update the secondary index key to unsupported type. An operand in the - // update expression has an incorrect data type. Item size to update has - // exceeded the maximum allowed size. Number overflow. Attempting to store - // a number with magnitude larger than supported range. Type mismatch for - // attribute to update. Nesting Levels have exceeded supported limits. The - // document path provided in the update expression is invalid for update. - // The provided expression refers to an attribute that does not exist in - // the item. - ErrCodeTransactionCanceledException = "TransactionCanceledException" - - // ErrCodeTransactionConflictException for service response error code - // "TransactionConflictException". - // - // Operation was rejected because there is an ongoing transaction for the item. - ErrCodeTransactionConflictException = "TransactionConflictException" - - // ErrCodeTransactionInProgressException for service response error code - // "TransactionInProgressException". - // - // The transaction with the given request token is already in progress. - // - // Recommended Settings - // - // This is a general recommendation for handling the TransactionInProgressException. - // These settings help ensure that the client retries will trigger completion - // of the ongoing TransactWriteItems request. - // - // * Set clientExecutionTimeout to a value that allows at least one retry - // to be processed after 5 seconds have elapsed since the first attempt for - // the TransactWriteItems operation. - // - // * Set socketTimeout to a value a little lower than the requestTimeout - // setting. - // - // * requestTimeout should be set based on the time taken for the individual - // retries of a single HTTP request for your use case, but setting it to - // 1 second or higher should work well to reduce chances of retries and TransactionInProgressException - // errors. - // - // * Use exponential backoff when retrying and tune backoff if needed. - // - // Assuming default retry policy (https://github.com/aws/aws-sdk-java/blob/fd409dee8ae23fb8953e0bb4dbde65536a7e0514/aws-java-sdk-core/src/main/java/com/amazonaws/retry/PredefinedRetryPolicies.java#L97), - // example timeout settings based on the guidelines above are as follows: - // - // Example timeline: - // - // * 0-1000 first attempt - // - // * 1000-1500 first sleep/delay (default retry policy uses 500 ms as base - // delay for 4xx errors) - // - // * 1500-2500 second attempt - // - // * 2500-3500 second sleep/delay (500 * 2, exponential backoff) - // - // * 3500-4500 third attempt - // - // * 4500-6500 third sleep/delay (500 * 2^2) - // - // * 6500-7500 fourth attempt (this can trigger inline recovery since 5 seconds - // have elapsed since the first attempt reached TC) - ErrCodeTransactionInProgressException = "TransactionInProgressException" -) - -var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ - "BackupInUseException": newErrorBackupInUseException, - "BackupNotFoundException": newErrorBackupNotFoundException, - "ConditionalCheckFailedException": newErrorConditionalCheckFailedException, - "ContinuousBackupsUnavailableException": newErrorContinuousBackupsUnavailableException, - "DuplicateItemException": newErrorDuplicateItemException, - "ExportConflictException": newErrorExportConflictException, - "ExportNotFoundException": newErrorExportNotFoundException, - "GlobalTableAlreadyExistsException": newErrorGlobalTableAlreadyExistsException, - "GlobalTableNotFoundException": newErrorGlobalTableNotFoundException, - "IdempotentParameterMismatchException": newErrorIdempotentParameterMismatchException, - "ImportConflictException": newErrorImportConflictException, - "ImportNotFoundException": newErrorImportNotFoundException, - "IndexNotFoundException": newErrorIndexNotFoundException, - "InternalServerError": newErrorInternalServerError, - "InvalidExportTimeException": newErrorInvalidExportTimeException, - "InvalidRestoreTimeException": newErrorInvalidRestoreTimeException, - "ItemCollectionSizeLimitExceededException": newErrorItemCollectionSizeLimitExceededException, - "LimitExceededException": newErrorLimitExceededException, - "PointInTimeRecoveryUnavailableException": newErrorPointInTimeRecoveryUnavailableException, - "PolicyNotFoundException": newErrorPolicyNotFoundException, - "ProvisionedThroughputExceededException": newErrorProvisionedThroughputExceededException, - "ReplicaAlreadyExistsException": newErrorReplicaAlreadyExistsException, - "ReplicaNotFoundException": newErrorReplicaNotFoundException, - "RequestLimitExceeded": newErrorRequestLimitExceeded, - "ResourceInUseException": newErrorResourceInUseException, - "ResourceNotFoundException": newErrorResourceNotFoundException, - "TableAlreadyExistsException": newErrorTableAlreadyExistsException, - "TableInUseException": newErrorTableInUseException, - "TableNotFoundException": newErrorTableNotFoundException, - "TransactionCanceledException": newErrorTransactionCanceledException, - "TransactionConflictException": newErrorTransactionConflictException, - "TransactionInProgressException": newErrorTransactionInProgressException, -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/service.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/service.go deleted file mode 100644 index ce0ed74469..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/service.go +++ /dev/null @@ -1,112 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package dynamodb - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/client/metadata" - "github.com/aws/aws-sdk-go/aws/crr" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/aws/signer/v4" - "github.com/aws/aws-sdk-go/private/protocol" - "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" -) - -// DynamoDB provides the API operation methods for making requests to -// Amazon DynamoDB. See this package's package overview docs -// for details on the service. -// -// DynamoDB methods are safe to use concurrently. It is not safe to -// modify mutate any of the struct's properties though. -type DynamoDB struct { - *client.Client - endpointCache *crr.EndpointCache -} - -// Used for custom client initialization logic -var initClient func(*client.Client) - -// Used for custom request initialization logic -var initRequest func(*request.Request) - -// Service information constants -const ( - ServiceName = "dynamodb" // Name of service. - EndpointsID = ServiceName // ID to lookup a service endpoint with. - ServiceID = "DynamoDB" // ServiceID is a unique identifier of a specific service. -) - -// New creates a new instance of the DynamoDB client with a session. -// If additional configuration is needed for the client instance use the optional -// aws.Config parameter to add your extra config. -// -// Example: -// -// mySession := session.Must(session.NewSession()) -// -// // Create a DynamoDB client from just a session. -// svc := dynamodb.New(mySession) -// -// // Create a DynamoDB client with additional configuration -// svc := dynamodb.New(mySession, aws.NewConfig().WithRegion("us-west-2")) -func New(p client.ConfigProvider, cfgs ...*aws.Config) *DynamoDB { - c := p.ClientConfig(EndpointsID, cfgs...) - if c.SigningNameDerived || len(c.SigningName) == 0 { - c.SigningName = EndpointsID - // No Fallback - } - return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName, c.ResolvedRegion) -} - -// newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName, resolvedRegion string) *DynamoDB { - svc := &DynamoDB{ - Client: client.New( - cfg, - metadata.ClientInfo{ - ServiceName: ServiceName, - ServiceID: ServiceID, - SigningName: signingName, - SigningRegion: signingRegion, - PartitionID: partitionID, - Endpoint: endpoint, - APIVersion: "2012-08-10", - ResolvedRegion: resolvedRegion, - JSONVersion: "1.0", - TargetPrefix: "DynamoDB_20120810", - }, - handlers, - ), - } - svc.endpointCache = crr.NewEndpointCache(10) - - // Handlers - svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) - svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) - svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) - svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) - svc.Handlers.UnmarshalError.PushBackNamed( - protocol.NewUnmarshalErrorHandler(jsonrpc.NewUnmarshalTypedError(exceptionFromCode)).NamedHandler(), - ) - - // Run custom client initialization if present - if initClient != nil { - initClient(svc.Client) - } - - return svc -} - -// newRequest creates a new request for a DynamoDB operation and runs any -// custom request initialization. -func (c *DynamoDB) newRequest(op *request.Operation, params, data interface{}) *request.Request { - req := c.NewRequest(op, params, data) - - // Run custom request initialization if present - if initRequest != nil { - initRequest(req) - } - - return req -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/waiters.go deleted file mode 100644 index ae515f7de5..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/waiters.go +++ /dev/null @@ -1,107 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package dynamodb - -import ( - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/request" -) - -// WaitUntilTableExists uses the DynamoDB API operation -// DescribeTable to wait for a condition to be met before returning. -// If the condition is not met within the max attempt window, an error will -// be returned. -func (c *DynamoDB) WaitUntilTableExists(input *DescribeTableInput) error { - return c.WaitUntilTableExistsWithContext(aws.BackgroundContext(), input) -} - -// WaitUntilTableExistsWithContext is an extended version of WaitUntilTableExists. -// With the support for passing in a context and options to configure the -// Waiter and the underlying request options. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DynamoDB) WaitUntilTableExistsWithContext(ctx aws.Context, input *DescribeTableInput, opts ...request.WaiterOption) error { - w := request.Waiter{ - Name: "WaitUntilTableExists", - MaxAttempts: 25, - Delay: request.ConstantWaiterDelay(20 * time.Second), - Acceptors: []request.WaiterAcceptor{ - { - State: request.SuccessWaiterState, - Matcher: request.PathWaiterMatch, Argument: "Table.TableStatus", - Expected: "ACTIVE", - }, - { - State: request.RetryWaiterState, - Matcher: request.ErrorWaiterMatch, - Expected: "ResourceNotFoundException", - }, - }, - Logger: c.Config.Logger, - NewRequest: func(opts []request.Option) (*request.Request, error) { - var inCpy *DescribeTableInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.DescribeTableRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - w.ApplyOptions(opts...) - - return w.WaitWithContext(ctx) -} - -// WaitUntilTableNotExists uses the DynamoDB API operation -// DescribeTable to wait for a condition to be met before returning. -// If the condition is not met within the max attempt window, an error will -// be returned. -func (c *DynamoDB) WaitUntilTableNotExists(input *DescribeTableInput) error { - return c.WaitUntilTableNotExistsWithContext(aws.BackgroundContext(), input) -} - -// WaitUntilTableNotExistsWithContext is an extended version of WaitUntilTableNotExists. -// With the support for passing in a context and options to configure the -// Waiter and the underlying request options. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DynamoDB) WaitUntilTableNotExistsWithContext(ctx aws.Context, input *DescribeTableInput, opts ...request.WaiterOption) error { - w := request.Waiter{ - Name: "WaitUntilTableNotExists", - MaxAttempts: 25, - Delay: request.ConstantWaiterDelay(20 * time.Second), - Acceptors: []request.WaiterAcceptor{ - { - State: request.SuccessWaiterState, - Matcher: request.ErrorWaiterMatch, - Expected: "ResourceNotFoundException", - }, - }, - Logger: c.Config.Logger, - NewRequest: func(opts []request.Option) (*request.Request, error) { - var inCpy *DescribeTableInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.DescribeTableRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - w.ApplyOptions(opts...) - - return w.WaitWithContext(ctx) -} diff --git a/vendor/github.com/aws/smithy-go/CHANGELOG.md b/vendor/github.com/aws/smithy-go/CHANGELOG.md index 4df632dce8..8b6ab29500 100644 --- a/vendor/github.com/aws/smithy-go/CHANGELOG.md +++ b/vendor/github.com/aws/smithy-go/CHANGELOG.md @@ -1,13 +1,41 @@ -# Release (2025-02-17) +# Release (2025-08-27) ## General Highlights * **Dependency Update**: Updated to the latest SDK module versions ## Module Highlights -* `github.com/aws/smithy-go`: v1.22.3 +* `github.com/aws/smithy-go`: v1.23.0 + * **Feature**: Sort map keys in JSON Document types. + +# Release (2025-07-24) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/smithy-go`: v1.22.5 + * **Feature**: Add HTTP interceptors. + +# Release (2025-06-16) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/smithy-go`: v1.22.4 + * **Bug Fix**: Fix CBOR serd empty check for string and enum fields * **Bug Fix**: Fix HTTP metrics data race. * **Bug Fix**: Replace usages of deprecated ioutil package. +# Release (2025-02-17) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/smithy-go`: v1.22.3 + * **Dependency Update**: Bump minimum Go version to 1.22 per our language support policy. + # Release (2025-01-21) ## General Highlights diff --git a/vendor/github.com/aws/smithy-go/Makefile b/vendor/github.com/aws/smithy-go/Makefile index a3c2cf173d..34b17ab2fe 100644 --- a/vendor/github.com/aws/smithy-go/Makefile +++ b/vendor/github.com/aws/smithy-go/Makefile @@ -30,6 +30,24 @@ smithy-build: smithy-clean: cd codegen && ./gradlew clean +GRADLE_RETRIES := 3 +GRADLE_SLEEP := 2 + +# We're making a call to ./gradlew to trigger downloading Gradle and +# starting the daemon. Any call works, so using `./gradlew help` +ensure-gradle-up: + @cd codegen && for i in $(shell seq 1 $(GRADLE_RETRIES)); do \ + echo "Checking if Gradle daemon is up, attempt $$i..."; \ + if ./gradlew help; then \ + echo "Gradle daemon is up!"; \ + exit 0; \ + fi; \ + echo "Failed to start Gradle, retrying in $(GRADLE_SLEEP) seconds..."; \ + sleep $(GRADLE_SLEEP); \ + done; \ + echo "Failed to start Gradle after $(GRADLE_RETRIES) attempts."; \ + exit 1 + ################## # Linting/Verify # ################## @@ -51,12 +69,10 @@ cover: .PHONY: unit unit-race unit-test unit-race-test unit: verify - go vet ${BUILD_TAGS} --all ./... && \ go test ${BUILD_TAGS} ${RUN_NONE} ./... && \ go test -timeout=1m ${UNIT_TEST_TAGS} ./... unit-race: verify - go vet ${BUILD_TAGS} --all ./... && \ go test ${BUILD_TAGS} ${RUN_NONE} ./... && \ go test -timeout=1m ${UNIT_TEST_TAGS} -race -cpu=4 ./... diff --git a/vendor/github.com/aws/smithy-go/README.md b/vendor/github.com/aws/smithy-go/README.md index 08df74589a..77a74ae0c2 100644 --- a/vendor/github.com/aws/smithy-go/README.md +++ b/vendor/github.com/aws/smithy-go/README.md @@ -4,19 +4,21 @@ [Smithy](https://smithy.io/) code generators for Go and the accompanying smithy-go runtime. -The smithy-go runtime requires a minimum version of Go 1.20. +The smithy-go runtime requires a minimum version of Go 1.22. **WARNING: All interfaces are subject to change.** -## Can I use the code generators? +## :no_entry_sign: DO NOT use the code generators in this repository + +**The code generators in this repository do not generate working clients at +this time.** In order to generate a usable smithy client you must provide a [protocol definition](https://github.com/aws/smithy-go/blob/main/codegen/smithy-go-codegen/src/main/java/software/amazon/smithy/go/codegen/integration/ProtocolGenerator.java), such as [AWS restJson1](https://smithy.io/2.0/aws/protocols/aws-restjson1-protocol.html), in order to generate transport mechanisms and serialization/deserialization code ("serde") accordingly. -The code generator does not currently support any protocols out of the box other than the new `smithy.protocols#rpcv2Cbor`, -therefore the useability of this project on its own is currently limited. +The code generator does not currently support any protocols out of the box. Support for all [AWS protocols](https://smithy.io/2.0/aws/protocols/index.html) exists in [aws-sdk-go-v2](https://github.com/aws/aws-sdk-go-v2). We are tracking the movement of those out of the SDK into smithy-go in @@ -31,6 +33,7 @@ This repository implements the following Smithy build plugins: |----|------------|-------------| | `go-codegen` | `software.amazon.smithy.go:smithy-go-codegen` | Implements Go client code generation for Smithy models. | | `go-server-codegen` | `software.amazon.smithy.go:smithy-go-codegen` | Implements Go server code generation for Smithy models. | +| `go-shape-codegen` | `software.amazon.smithy.go:smithy-go-codegen` | Implements Go shape code generation (types only) for Smithy models. | **NOTE: Build plugins are not currently published to mavenCentral. You must publish to mavenLocal to make the build plugins visible to the Smithy CLI. The artifact version is currently fixed at 0.1.0.** @@ -77,7 +80,7 @@ example created from `smithy init`: "service": "example.weather#Weather", "module": "github.com/example/weather", "generateGoMod": true, - "goDirective": "1.20" + "goDirective": "1.22" } } } @@ -87,6 +90,10 @@ example created from `smithy init`: This plugin is a work-in-progress and is currently undocumented. +## `go-shape-codegen` + +This plugin is a work-in-progress and is currently undocumented. + ## License This project is licensed under the Apache-2.0 License. diff --git a/vendor/github.com/aws/smithy-go/endpoints/endpoint.go b/vendor/github.com/aws/smithy-go/endpoints/endpoint.go index a935283974..f778272be3 100644 --- a/vendor/github.com/aws/smithy-go/endpoints/endpoint.go +++ b/vendor/github.com/aws/smithy-go/endpoints/endpoint.go @@ -9,7 +9,7 @@ import ( // Endpoint is the endpoint object returned by Endpoint resolution V2 type Endpoint struct { - // The complete URL minimally specfiying the scheme and host. + // The complete URL minimally specifying the scheme and host. // May optionally specify the port and base path component. URI url.URL diff --git a/vendor/github.com/aws/smithy-go/endpoints/private/rulesfn/doc.go b/vendor/github.com/aws/smithy-go/endpoints/private/rulesfn/doc.go new file mode 100644 index 0000000000..e24e190dca --- /dev/null +++ b/vendor/github.com/aws/smithy-go/endpoints/private/rulesfn/doc.go @@ -0,0 +1,4 @@ +// Package rulesfn provides endpoint rule functions for evaluating endpoint +// resolution rules. + +package rulesfn diff --git a/vendor/github.com/aws/smithy-go/endpoints/private/rulesfn/strings.go b/vendor/github.com/aws/smithy-go/endpoints/private/rulesfn/strings.go new file mode 100644 index 0000000000..5cf4a7b02d --- /dev/null +++ b/vendor/github.com/aws/smithy-go/endpoints/private/rulesfn/strings.go @@ -0,0 +1,25 @@ +package rulesfn + +// Substring returns the substring of the input provided. If the start or stop +// indexes are not valid for the input nil will be returned. If errors occur +// they will be added to the provided [ErrorCollector]. +func SubString(input string, start, stop int, reverse bool) *string { + if start < 0 || stop < 1 || start >= stop || len(input) < stop { + return nil + } + + for _, r := range input { + if r > 127 { + return nil + } + } + + if !reverse { + v := input[start:stop] + return &v + } + + rStart := len(input) - stop + rStop := len(input) - start + return SubString(input, rStart, rStop, false) +} diff --git a/vendor/github.com/aws/smithy-go/endpoints/private/rulesfn/uri.go b/vendor/github.com/aws/smithy-go/endpoints/private/rulesfn/uri.go new file mode 100644 index 0000000000..0c11541276 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/endpoints/private/rulesfn/uri.go @@ -0,0 +1,130 @@ +package rulesfn + +import ( + "fmt" + "net" + "net/url" + "strings" + + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// IsValidHostLabel returns if the input is a single valid [RFC 1123] host +// label. If allowSubDomains is true, will allow validation to include nested +// host labels. Returns false if the input is not a valid host label. If errors +// occur they will be added to the provided [ErrorCollector]. +// +// [RFC 1123]: https://www.ietf.org/rfc/rfc1123.txt +func IsValidHostLabel(input string, allowSubDomains bool) bool { + var labels []string + if allowSubDomains { + labels = strings.Split(input, ".") + } else { + labels = []string{input} + } + + for _, label := range labels { + if !smithyhttp.ValidHostLabel(label) { + return false + } + } + + return true +} + +// ParseURL returns a [URL] if the provided string could be parsed. Returns nil +// if the string could not be parsed. Any parsing error will be added to the +// [ErrorCollector]. +// +// If the input URL string contains an IP6 address with a zone index. The +// returned [builtin.URL.Authority] value will contain the percent escaped (%) +// zone index separator. +func ParseURL(input string) *URL { + u, err := url.Parse(input) + if err != nil { + return nil + } + + if u.RawQuery != "" { + return nil + } + + if u.Scheme != "http" && u.Scheme != "https" { + return nil + } + + normalizedPath := u.Path + if !strings.HasPrefix(normalizedPath, "/") { + normalizedPath = "/" + normalizedPath + } + if !strings.HasSuffix(normalizedPath, "/") { + normalizedPath = normalizedPath + "/" + } + + // IP6 hosts may have zone indexes that need to be escaped to be valid in a + // URI. The Go URL parser will unescape the `%25` into `%`. This needs to + // be reverted since the returned URL will be used in string builders. + authority := strings.ReplaceAll(u.Host, "%", "%25") + + return &URL{ + Scheme: u.Scheme, + Authority: authority, + Path: u.Path, + NormalizedPath: normalizedPath, + IsIp: net.ParseIP(hostnameWithoutZone(u)) != nil, + } +} + +// URL provides the structure describing the parts of a parsed URL returned by +// [ParseURL]. +type URL struct { + Scheme string // https://www.rfc-editor.org/rfc/rfc3986#section-3.1 + Authority string // https://www.rfc-editor.org/rfc/rfc3986#section-3.2 + Path string // https://www.rfc-editor.org/rfc/rfc3986#section-3.3 + NormalizedPath string // https://www.rfc-editor.org/rfc/rfc3986#section-6.2.3 + IsIp bool +} + +// URIEncode returns an percent-encoded [RFC3986 section 2.1] version of the +// input string. +// +// [RFC3986 section 2.1]: https://www.rfc-editor.org/rfc/rfc3986#section-2.1 +func URIEncode(input string) string { + var output strings.Builder + for _, c := range []byte(input) { + if validPercentEncodedChar(c) { + output.WriteByte(c) + continue + } + + fmt.Fprintf(&output, "%%%X", c) + } + + return output.String() +} + +func validPercentEncodedChar(c byte) bool { + return (c >= 'a' && c <= 'z') || + (c >= 'A' && c <= 'Z') || + (c >= '0' && c <= '9') || + c == '-' || c == '_' || c == '.' || c == '~' +} + +// hostname implements u.Hostname() but strips the ipv6 zone ID (if present) +// such that net.ParseIP can still recognize IPv6 addresses with zone IDs. +// +// FUTURE(10/2023): netip.ParseAddr handles this natively but we can't take +// that package as a dependency yet due to our min go version (1.15, netip +// starts in 1.18). When we align with go runtime deprecation policy in +// 10/2023, we can remove this. +func hostnameWithoutZone(u *url.URL) string { + full := u.Hostname() + + // this more or less mimics the internals of net/ (see unexported + // splitHostZone in that source) but throws the zone away because we don't + // need it + if i := strings.LastIndex(full, "%"); i > -1 { + return full[:i] + } + return full +} diff --git a/vendor/github.com/aws/smithy-go/go_module_metadata.go b/vendor/github.com/aws/smithy-go/go_module_metadata.go index d12d95891d..945db0af30 100644 --- a/vendor/github.com/aws/smithy-go/go_module_metadata.go +++ b/vendor/github.com/aws/smithy-go/go_module_metadata.go @@ -3,4 +3,4 @@ package smithy // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.22.3" +const goModuleVersion = "1.23.0" diff --git a/vendor/github.com/aws/smithy-go/modman.toml b/vendor/github.com/aws/smithy-go/modman.toml index 9d94b7cbd0..aac582fa2c 100644 --- a/vendor/github.com/aws/smithy-go/modman.toml +++ b/vendor/github.com/aws/smithy-go/modman.toml @@ -1,5 +1,4 @@ [dependencies] - "github.com/jmespath/go-jmespath" = "v0.4.0" [modules] diff --git a/vendor/github.com/aws/smithy-go/transport/http/interceptor.go b/vendor/github.com/aws/smithy-go/transport/http/interceptor.go new file mode 100644 index 0000000000..e21f2632a6 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/interceptor.go @@ -0,0 +1,321 @@ +package http + +import ( + "context" +) + +func icopy[T any](v []T) []T { + s := make([]T, len(v)) + copy(s, v) + return s +} + +// InterceptorContext is all the information available in different +// interceptors. +// +// Not all information is available in each interceptor, see each interface +// definition for more details. +type InterceptorContext struct { + Input any + Request *Request + + Output any + Response *Response +} + +// InterceptorRegistry holds a list of operation interceptors. +// +// Interceptors allow callers to insert custom behavior at well-defined points +// within a client's operation lifecycle. +// +// # Interceptor context +// +// All interceptors are invoked with a context object that contains input and +// output containers for the operation. The individual fields that are +// available will depend on what the interceptor is and, in certain +// interceptors, how far the operation was able to progress. See the +// documentation for each interface definition for more information about field +// availability. +// +// Implementations MUST NOT directly mutate the values of the fields in the +// interceptor context. They are free to mutate the existing values _pointed +// to_ by those fields, however. +// +// # Returning errors +// +// All interceptors can return errors. If an interceptor returns an error +// _before_ the client's retry loop, the operation will fail immediately. If +// one returns an error _within_ the retry loop, the error WILL be considered +// according to the client's retry policy. +// +// # Adding interceptors +// +// Idiomatically you will simply use one of the Add() receiver methods to +// register interceptors as desired. However, the list for each interface is +// exported on the registry struct and the caller is free to manipulate it +// directly, for example, to register a number of interceptors all at once, or +// to remove one that was previously registered. +// +// The base SDK client WILL NOT add any interceptors. SDK operations and +// customizations are implemented in terms of middleware. +// +// Modifications to the registry will not persist across operation calls when +// using per-operation functional options. This means you can register +// interceptors on a per-operation basis without affecting other operations. +type InterceptorRegistry struct { + BeforeExecution []BeforeExecutionInterceptor + BeforeSerialization []BeforeSerializationInterceptor + AfterSerialization []AfterSerializationInterceptor + BeforeRetryLoop []BeforeRetryLoopInterceptor + BeforeAttempt []BeforeAttemptInterceptor + BeforeSigning []BeforeSigningInterceptor + AfterSigning []AfterSigningInterceptor + BeforeTransmit []BeforeTransmitInterceptor + AfterTransmit []AfterTransmitInterceptor + BeforeDeserialization []BeforeDeserializationInterceptor + AfterDeserialization []AfterDeserializationInterceptor + AfterAttempt []AfterAttemptInterceptor + AfterExecution []AfterExecutionInterceptor +} + +// Copy returns a deep copy of the registry. This is used by SDK clients on +// each operation call in order to prevent per-op config mutation from +// persisting. +func (i *InterceptorRegistry) Copy() InterceptorRegistry { + return InterceptorRegistry{ + BeforeExecution: icopy(i.BeforeExecution), + BeforeSerialization: icopy(i.BeforeSerialization), + AfterSerialization: icopy(i.AfterSerialization), + BeforeRetryLoop: icopy(i.BeforeRetryLoop), + BeforeAttempt: icopy(i.BeforeAttempt), + BeforeSigning: icopy(i.BeforeSigning), + AfterSigning: icopy(i.AfterSigning), + BeforeTransmit: icopy(i.BeforeTransmit), + AfterTransmit: icopy(i.AfterTransmit), + BeforeDeserialization: icopy(i.BeforeDeserialization), + AfterDeserialization: icopy(i.AfterDeserialization), + AfterAttempt: icopy(i.AfterAttempt), + AfterExecution: icopy(i.AfterExecution), + } +} + +// AddBeforeExecution registers the provided BeforeExecutionInterceptor. +func (i *InterceptorRegistry) AddBeforeExecution(v BeforeExecutionInterceptor) { + i.BeforeExecution = append(i.BeforeExecution, v) +} + +// AddBeforeSerialization registers the provided BeforeSerializationInterceptor. +func (i *InterceptorRegistry) AddBeforeSerialization(v BeforeSerializationInterceptor) { + i.BeforeSerialization = append(i.BeforeSerialization, v) +} + +// AddAfterSerialization registers the provided AfterSerializationInterceptor. +func (i *InterceptorRegistry) AddAfterSerialization(v AfterSerializationInterceptor) { + i.AfterSerialization = append(i.AfterSerialization, v) +} + +// AddBeforeRetryLoop registers the provided BeforeRetryLoopInterceptor. +func (i *InterceptorRegistry) AddBeforeRetryLoop(v BeforeRetryLoopInterceptor) { + i.BeforeRetryLoop = append(i.BeforeRetryLoop, v) +} + +// AddBeforeAttempt registers the provided BeforeAttemptInterceptor. +func (i *InterceptorRegistry) AddBeforeAttempt(v BeforeAttemptInterceptor) { + i.BeforeAttempt = append(i.BeforeAttempt, v) +} + +// AddBeforeSigning registers the provided BeforeSigningInterceptor. +func (i *InterceptorRegistry) AddBeforeSigning(v BeforeSigningInterceptor) { + i.BeforeSigning = append(i.BeforeSigning, v) +} + +// AddAfterSigning registers the provided AfterSigningInterceptor. +func (i *InterceptorRegistry) AddAfterSigning(v AfterSigningInterceptor) { + i.AfterSigning = append(i.AfterSigning, v) +} + +// AddBeforeTransmit registers the provided BeforeTransmitInterceptor. +func (i *InterceptorRegistry) AddBeforeTransmit(v BeforeTransmitInterceptor) { + i.BeforeTransmit = append(i.BeforeTransmit, v) +} + +// AddAfterTransmit registers the provided AfterTransmitInterceptor. +func (i *InterceptorRegistry) AddAfterTransmit(v AfterTransmitInterceptor) { + i.AfterTransmit = append(i.AfterTransmit, v) +} + +// AddBeforeDeserialization registers the provided BeforeDeserializationInterceptor. +func (i *InterceptorRegistry) AddBeforeDeserialization(v BeforeDeserializationInterceptor) { + i.BeforeDeserialization = append(i.BeforeDeserialization, v) +} + +// AddAfterDeserialization registers the provided AfterDeserializationInterceptor. +func (i *InterceptorRegistry) AddAfterDeserialization(v AfterDeserializationInterceptor) { + i.AfterDeserialization = append(i.AfterDeserialization, v) +} + +// AddAfterAttempt registers the provided AfterAttemptInterceptor. +func (i *InterceptorRegistry) AddAfterAttempt(v AfterAttemptInterceptor) { + i.AfterAttempt = append(i.AfterAttempt, v) +} + +// AddAfterExecution registers the provided AfterExecutionInterceptor. +func (i *InterceptorRegistry) AddAfterExecution(v AfterExecutionInterceptor) { + i.AfterExecution = append(i.AfterExecution, v) +} + +// BeforeExecutionInterceptor runs before anything else in the operation +// lifecycle. +// +// Available InterceptorContext fields: +// - Input +type BeforeExecutionInterceptor interface { + BeforeExecution(ctx context.Context, in *InterceptorContext) error +} + +// BeforeSerializationInterceptor runs before the operation input is serialized +// into its transport request. +// +// Serialization occurs before the operation's retry loop. +// +// Available InterceptorContext fields: +// - Input +type BeforeSerializationInterceptor interface { + BeforeSerialization(ctx context.Context, in *InterceptorContext) error +} + +// AfterSerializationInterceptor runs after the operation input is serialized +// into its transport request. +// +// Available InterceptorContext fields: +// - Input +// - Request +type AfterSerializationInterceptor interface { + AfterSerialization(ctx context.Context, in *InterceptorContext) error +} + +// BeforeRetryLoopInterceptor runs right before the operation enters the retry loop. +// +// Available InterceptorContext fields: +// - Input +// - Request +type BeforeRetryLoopInterceptor interface { + BeforeRetryLoop(ctx context.Context, in *InterceptorContext) error +} + +// BeforeAttemptInterceptor runs right before every attempt in the retry loop. +// +// If this interceptor returns an error, AfterAttempt interceptors WILL NOT be +// invoked. +// +// Available InterceptorContext fields: +// - Input +// - Request +type BeforeAttemptInterceptor interface { + BeforeAttempt(ctx context.Context, in *InterceptorContext) error +} + +// BeforeSigningInterceptor runs right before the request is signed. +// +// Signing occurs within the operation's retry loop. +// +// Available InterceptorContext fields: +// - Input +// - Request +type BeforeSigningInterceptor interface { + BeforeSigning(ctx context.Context, in *InterceptorContext) error +} + +// AfterSigningInterceptor runs right after the request is signed. +// +// It is unsafe to modify the outgoing HTTP request at or past this hook, since +// doing so may invalidate the signature of the request. +// +// Available InterceptorContext fields: +// - Input +// - Request +type AfterSigningInterceptor interface { + AfterSigning(ctx context.Context, in *InterceptorContext) error +} + +// BeforeTransmitInterceptor runs right before the HTTP request is sent. +// +// HTTP transmit occurs within the operation's retry loop. +// +// Available InterceptorContext fields: +// - Input +// - Request +type BeforeTransmitInterceptor interface { + BeforeTransmit(ctx context.Context, in *InterceptorContext) error +} + +// AfterTransmitInterceptor runs right after the HTTP response is received. +// +// It will always be invoked when a response is received, regardless of its +// status code. Conversely, it WILL NOT be invoked if the HTTP round-trip was +// not successful, e.g. because of a DNS resolution error +// +// Available InterceptorContext fields: +// - Input +// - Request +// - Response +type AfterTransmitInterceptor interface { + AfterTransmit(ctx context.Context, in *InterceptorContext) error +} + +// BeforeDeserializationInterceptor runs right before the incoming HTTP response +// is deserialized. +// +// This interceptor IS NOT invoked if the HTTP round-trip was not successful. +// +// Deserialization occurs within the operation's retry loop. +// +// Available InterceptorContext fields: +// - Input +// - Request +// - Response +type BeforeDeserializationInterceptor interface { + BeforeDeserialization(ctx context.Context, in *InterceptorContext) error +} + +// AfterDeserializationInterceptor runs right after the incoming HTTP response +// is deserialized. This hook is invoked regardless of whether the deserialized +// result was an error. +// +// This interceptor IS NOT invoked if the HTTP round-trip was not successful. +// +// Available InterceptorContext fields: +// - Input +// - Output (IF the operation had a success-level response) +// - Request +// - Response +type AfterDeserializationInterceptor interface { + AfterDeserialization(ctx context.Context, in *InterceptorContext) error +} + +// AfterAttemptInterceptor runs right after the incoming HTTP response +// is deserialized. This hook is invoked regardless of whether the deserialized +// result was an error, or if another interceptor within the retry loop +// returned an error. +// +// Available InterceptorContext fields: +// - Input +// - Output (IF the operation had a success-level response) +// - Request (IF the operation did not return an error during serialization) +// - Response (IF the operation was able to transmit the HTTP request) +type AfterAttemptInterceptor interface { + AfterAttempt(ctx context.Context, in *InterceptorContext) error +} + +// AfterExecutionInterceptor runs after everything else. It runs regardless of +// how far the operation progressed in its lifecycle, and regardless of whether +// the operation succeeded or failed. +// +// Available InterceptorContext fields: +// - Input +// - Output (IF the operation had a success-level response) +// - Request (IF the operation did not return an error during serialization) +// - Response (IF the operation was able to transmit the HTTP request) +type AfterExecutionInterceptor interface { + AfterExecution(ctx context.Context, in *InterceptorContext) error +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/interceptor_middleware.go b/vendor/github.com/aws/smithy-go/transport/http/interceptor_middleware.go new file mode 100644 index 0000000000..2cc4b57f89 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/interceptor_middleware.go @@ -0,0 +1,325 @@ +package http + +import ( + "context" + "errors" + + "github.com/aws/smithy-go/middleware" +) + +type ictxKey struct{} + +func withIctx(ctx context.Context) context.Context { + return middleware.WithStackValue(ctx, ictxKey{}, &InterceptorContext{}) +} + +func getIctx(ctx context.Context) *InterceptorContext { + return middleware.GetStackValue(ctx, ictxKey{}).(*InterceptorContext) +} + +// InterceptExecution runs Before/AfterExecutionInterceptors. +type InterceptExecution struct { + BeforeExecution []BeforeExecutionInterceptor + AfterExecution []AfterExecutionInterceptor +} + +// ID identifies the middleware. +func (m *InterceptExecution) ID() string { + return "InterceptExecution" +} + +// HandleInitialize runs the interceptors. +func (m *InterceptExecution) HandleInitialize( + ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, +) ( + out middleware.InitializeOutput, md middleware.Metadata, err error, +) { + ctx = withIctx(ctx) + getIctx(ctx).Input = in.Parameters + + for _, i := range m.BeforeExecution { + if err := i.BeforeExecution(ctx, getIctx(ctx)); err != nil { + return out, md, err + } + } + + out, md, err = next.HandleInitialize(ctx, in) + + for _, i := range m.AfterExecution { + if err := i.AfterExecution(ctx, getIctx(ctx)); err != nil { + return out, md, err + } + } + + return out, md, err +} + +// InterceptBeforeSerialization runs BeforeSerializationInterceptors. +type InterceptBeforeSerialization struct { + Interceptors []BeforeSerializationInterceptor +} + +// ID identifies the middleware. +func (m *InterceptBeforeSerialization) ID() string { + return "InterceptBeforeSerialization" +} + +// HandleSerialize runs the interceptors. +func (m *InterceptBeforeSerialization) HandleSerialize( + ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, +) ( + out middleware.SerializeOutput, md middleware.Metadata, err error, +) { + for _, i := range m.Interceptors { + if err := i.BeforeSerialization(ctx, getIctx(ctx)); err != nil { + return out, md, err + } + } + + return next.HandleSerialize(ctx, in) +} + +// InterceptAfterSerialization runs AfterSerializationInterceptors. +type InterceptAfterSerialization struct { + Interceptors []AfterSerializationInterceptor +} + +// ID identifies the middleware. +func (m *InterceptAfterSerialization) ID() string { + return "InterceptAfterSerialization" +} + +// HandleSerialize runs the interceptors. +func (m *InterceptAfterSerialization) HandleSerialize( + ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, +) ( + out middleware.SerializeOutput, md middleware.Metadata, err error, +) { + getIctx(ctx).Request = in.Request.(*Request) + + for _, i := range m.Interceptors { + if err := i.AfterSerialization(ctx, getIctx(ctx)); err != nil { + return out, md, err + } + } + + return next.HandleSerialize(ctx, in) +} + +// InterceptBeforeRetryLoop runs BeforeRetryLoopInterceptors. +type InterceptBeforeRetryLoop struct { + Interceptors []BeforeRetryLoopInterceptor +} + +// ID identifies the middleware. +func (m *InterceptBeforeRetryLoop) ID() string { + return "InterceptBeforeRetryLoop" +} + +// HandleFinalize runs the interceptors. +func (m *InterceptBeforeRetryLoop) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + out middleware.FinalizeOutput, md middleware.Metadata, err error, +) { + for _, i := range m.Interceptors { + if err := i.BeforeRetryLoop(ctx, getIctx(ctx)); err != nil { + return out, md, err + } + } + + return next.HandleFinalize(ctx, in) +} + +// InterceptBeforeSigning runs BeforeSigningInterceptors. +type InterceptBeforeSigning struct { + Interceptors []BeforeSigningInterceptor +} + +// ID identifies the middleware. +func (m *InterceptBeforeSigning) ID() string { + return "InterceptBeforeSigning" +} + +// HandleFinalize runs the interceptors. +func (m *InterceptBeforeSigning) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + out middleware.FinalizeOutput, md middleware.Metadata, err error, +) { + for _, i := range m.Interceptors { + if err := i.BeforeSigning(ctx, getIctx(ctx)); err != nil { + return out, md, err + } + } + + return next.HandleFinalize(ctx, in) +} + +// InterceptAfterSigning runs AfterSigningInterceptors. +type InterceptAfterSigning struct { + Interceptors []AfterSigningInterceptor +} + +// ID identifies the middleware. +func (m *InterceptAfterSigning) ID() string { + return "InterceptAfterSigning" +} + +// HandleFinalize runs the interceptors. +func (m *InterceptAfterSigning) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + out middleware.FinalizeOutput, md middleware.Metadata, err error, +) { + for _, i := range m.Interceptors { + if err := i.AfterSigning(ctx, getIctx(ctx)); err != nil { + return out, md, err + } + } + + return next.HandleFinalize(ctx, in) +} + +// InterceptTransmit runs BeforeTransmitInterceptors and AfterTransmitInterceptors. +type InterceptTransmit struct { + BeforeTransmit []BeforeTransmitInterceptor + AfterTransmit []AfterTransmitInterceptor +} + +// ID identifies the middleware. +func (m *InterceptTransmit) ID() string { + return "InterceptTransmit" +} + +// HandleDeserialize runs the interceptors. +func (m *InterceptTransmit) HandleDeserialize( + ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler, +) ( + out middleware.DeserializeOutput, md middleware.Metadata, err error, +) { + for _, i := range m.BeforeTransmit { + if err := i.BeforeTransmit(ctx, getIctx(ctx)); err != nil { + return out, md, err + } + } + + out, md, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, md, err + } + + // the root of the decorated middleware guarantees this will be here + // (client.go: ClientHandler.Handle) + getIctx(ctx).Response = out.RawResponse.(*Response) + + for _, i := range m.AfterTransmit { + if err := i.AfterTransmit(ctx, getIctx(ctx)); err != nil { + return out, md, err + } + } + + return out, md, err +} + +// InterceptBeforeDeserialization runs BeforeDeserializationInterceptors. +type InterceptBeforeDeserialization struct { + Interceptors []BeforeDeserializationInterceptor +} + +// ID identifies the middleware. +func (m *InterceptBeforeDeserialization) ID() string { + return "InterceptBeforeDeserialization" +} + +// HandleDeserialize runs the interceptors. +func (m *InterceptBeforeDeserialization) HandleDeserialize( + ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler, +) ( + out middleware.DeserializeOutput, md middleware.Metadata, err error, +) { + out, md, err = next.HandleDeserialize(ctx, in) + if err != nil { + var terr *RequestSendError + if errors.As(err, &terr) { + return out, md, err + } + } + + for _, i := range m.Interceptors { + if err := i.BeforeDeserialization(ctx, getIctx(ctx)); err != nil { + return out, md, err + } + } + + return out, md, err +} + +// InterceptAfterDeserialization runs AfterDeserializationInterceptors. +type InterceptAfterDeserialization struct { + Interceptors []AfterDeserializationInterceptor +} + +// ID identifies the middleware. +func (m *InterceptAfterDeserialization) ID() string { + return "InterceptAfterDeserialization" +} + +// HandleDeserialize runs the interceptors. +func (m *InterceptAfterDeserialization) HandleDeserialize( + ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler, +) ( + out middleware.DeserializeOutput, md middleware.Metadata, err error, +) { + out, md, err = next.HandleDeserialize(ctx, in) + if err != nil { + var terr *RequestSendError + if errors.As(err, &terr) { + return out, md, err + } + } + + getIctx(ctx).Output = out.Result + + for _, i := range m.Interceptors { + if err := i.AfterDeserialization(ctx, getIctx(ctx)); err != nil { + return out, md, err + } + } + + return out, md, err +} + +// InterceptAttempt runs AfterAttemptInterceptors. +type InterceptAttempt struct { + BeforeAttempt []BeforeAttemptInterceptor + AfterAttempt []AfterAttemptInterceptor +} + +// ID identifies the middleware. +func (m *InterceptAttempt) ID() string { + return "InterceptAttempt" +} + +// HandleFinalize runs the interceptors. +func (m *InterceptAttempt) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + out middleware.FinalizeOutput, md middleware.Metadata, err error, +) { + for _, i := range m.BeforeAttempt { + if err := i.BeforeAttempt(ctx, getIctx(ctx)); err != nil { + return out, md, err + } + } + + out, md, err = next.HandleFinalize(ctx, in) + + for _, i := range m.AfterAttempt { + if err := i.AfterAttempt(ctx, getIctx(ctx)); err != nil { + return out, md, err + } + } + + return out, md, err +} diff --git a/vendor/github.com/aws/smithy-go/waiter/logger.go b/vendor/github.com/aws/smithy-go/waiter/logger.go new file mode 100644 index 0000000000..8d70a03ff2 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/waiter/logger.go @@ -0,0 +1,36 @@ +package waiter + +import ( + "context" + "fmt" + + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" +) + +// Logger is the Logger middleware used by the waiter to log an attempt +type Logger struct { + // Attempt is the current attempt to be logged + Attempt int64 +} + +// ID representing the Logger middleware +func (*Logger) ID() string { + return "WaiterLogger" +} + +// HandleInitialize performs handling of request in initialize stack step +func (m *Logger) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + logger := middleware.GetLogger(ctx) + + logger.Logf(logging.Debug, fmt.Sprintf("attempting waiter request, attempt count: %d", m.Attempt)) + + return next.HandleInitialize(ctx, in) +} + +// AddLogger is a helper util to add waiter logger after `SetLogger` middleware in +func (m Logger) AddLogger(stack *middleware.Stack) error { + return stack.Initialize.Insert(&m, "SetLogger", middleware.After) +} diff --git a/vendor/github.com/aws/smithy-go/waiter/waiter.go b/vendor/github.com/aws/smithy-go/waiter/waiter.go new file mode 100644 index 0000000000..03e46e2ee7 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/waiter/waiter.go @@ -0,0 +1,66 @@ +package waiter + +import ( + "fmt" + "math" + "time" + + "github.com/aws/smithy-go/rand" +) + +// ComputeDelay computes delay between waiter attempts. The function takes in a current attempt count, +// minimum delay, maximum delay, and remaining wait time for waiter as input. The inputs minDelay and maxDelay +// must always be greater than 0, along with minDelay lesser than or equal to maxDelay. +// +// Returns the computed delay and if next attempt count is possible within the given input time constraints. +// Note that the zeroth attempt results in no delay. +func ComputeDelay(attempt int64, minDelay, maxDelay, remainingTime time.Duration) (delay time.Duration, err error) { + // zeroth attempt, no delay + if attempt <= 0 { + return 0, nil + } + + // remainingTime is zero or less, no delay + if remainingTime <= 0 { + return 0, nil + } + + // validate min delay is greater than 0 + if minDelay == 0 { + return 0, fmt.Errorf("minDelay must be greater than zero when computing Delay") + } + + // validate max delay is greater than 0 + if maxDelay == 0 { + return 0, fmt.Errorf("maxDelay must be greater than zero when computing Delay") + } + + // Get attempt ceiling to prevent integer overflow. + attemptCeiling := (math.Log(float64(maxDelay/minDelay)) / math.Log(2)) + 1 + + if attempt > int64(attemptCeiling) { + delay = maxDelay + } else { + // Compute exponential delay based on attempt. + ri := 1 << uint64(attempt-1) + // compute delay + delay = minDelay * time.Duration(ri) + } + + if delay != minDelay { + // randomize to get jitter between min delay and delay value + d, err := rand.CryptoRandInt63n(int64(delay - minDelay)) + if err != nil { + return 0, fmt.Errorf("error computing retry jitter, %w", err) + } + + delay = time.Duration(d) + minDelay + } + + // check if this is the last attempt possible and compute delay accordingly + if remainingTime-delay <= minDelay { + delay = remainingTime - minDelay + } + + return delay, nil +} diff --git a/vendor/github.com/go-chi/chi/v5/.gitignore b/vendor/github.com/go-chi/chi/v5/.gitignore new file mode 100644 index 0000000000..ba22c99a99 --- /dev/null +++ b/vendor/github.com/go-chi/chi/v5/.gitignore @@ -0,0 +1,3 @@ +.idea +*.sw? +.vscode diff --git a/vendor/github.com/go-chi/chi/v5/CHANGELOG.md b/vendor/github.com/go-chi/chi/v5/CHANGELOG.md new file mode 100644 index 0000000000..25b45b9743 --- /dev/null +++ b/vendor/github.com/go-chi/chi/v5/CHANGELOG.md @@ -0,0 +1,341 @@ +# Changelog + +## v5.0.12 (2024-02-16) + +- History of changes: see https://github.com/go-chi/chi/compare/v5.0.11...v5.0.12 + + +## v5.0.11 (2023-12-19) + +- History of changes: see https://github.com/go-chi/chi/compare/v5.0.10...v5.0.11 + + +## v5.0.10 (2023-07-13) + +- Fixed small edge case in tests of v5.0.9 for older Go versions +- History of changes: see https://github.com/go-chi/chi/compare/v5.0.9...v5.0.10 + + +## v5.0.9 (2023-07-13) + +- History of changes: see https://github.com/go-chi/chi/compare/v5.0.8...v5.0.9 + + +## v5.0.8 (2022-12-07) + +- History of changes: see https://github.com/go-chi/chi/compare/v5.0.7...v5.0.8 + + +## v5.0.7 (2021-11-18) + +- History of changes: see https://github.com/go-chi/chi/compare/v5.0.6...v5.0.7 + + +## v5.0.6 (2021-11-15) + +- History of changes: see https://github.com/go-chi/chi/compare/v5.0.5...v5.0.6 + + +## v5.0.5 (2021-10-27) + +- History of changes: see https://github.com/go-chi/chi/compare/v5.0.4...v5.0.5 + + +## v5.0.4 (2021-08-29) + +- History of changes: see https://github.com/go-chi/chi/compare/v5.0.3...v5.0.4 + + +## v5.0.3 (2021-04-29) + +- History of changes: see https://github.com/go-chi/chi/compare/v5.0.2...v5.0.3 + + +## v5.0.2 (2021-03-25) + +- History of changes: see https://github.com/go-chi/chi/compare/v5.0.1...v5.0.2 + + +## v5.0.1 (2021-03-10) + +- Small improvements +- History of changes: see https://github.com/go-chi/chi/compare/v5.0.0...v5.0.1 + + +## v5.0.0 (2021-02-27) + +- chi v5, `github.com/go-chi/chi/v5` introduces the adoption of Go's SIV to adhere to the current state-of-the-tools in Go. +- chi v1.5.x did not work out as planned, as the Go tooling is too powerful and chi's adoption is too wide. + The most responsible thing to do for everyone's benefit is to just release v5 with SIV, so I present to you all, + chi v5 at `github.com/go-chi/chi/v5`. I hope someday the developer experience and ergonomics I've been seeking + will still come to fruition in some form, see https://github.com/golang/go/issues/44550 +- History of changes: see https://github.com/go-chi/chi/compare/v1.5.4...v5.0.0 + + +## v1.5.4 (2021-02-27) + +- Undo prior retraction in v1.5.3 as we prepare for v5.0.0 release +- History of changes: see https://github.com/go-chi/chi/compare/v1.5.3...v1.5.4 + + +## v1.5.3 (2021-02-21) + +- Update go.mod to go 1.16 with new retract directive marking all versions without prior go.mod support +- History of changes: see https://github.com/go-chi/chi/compare/v1.5.2...v1.5.3 + + +## v1.5.2 (2021-02-10) + +- Reverting allocation optimization as a precaution as go test -race fails. +- Minor improvements, see history below +- History of changes: see https://github.com/go-chi/chi/compare/v1.5.1...v1.5.2 + + +## v1.5.1 (2020-12-06) + +- Performance improvement: removing 1 allocation by foregoing context.WithValue, thank you @bouk for + your contribution (https://github.com/go-chi/chi/pull/555). Note: new benchmarks posted in README. +- `middleware.CleanPath`: new middleware that clean's request path of double slashes +- deprecate & remove `chi.ServerBaseContext` in favour of stdlib `http.Server#BaseContext` +- plus other tiny improvements, see full commit history below +- History of changes: see https://github.com/go-chi/chi/compare/v4.1.2...v1.5.1 + + +## v1.5.0 (2020-11-12) - now with go.mod support + +`chi` dates back to 2016 with it's original implementation as one of the first routers to adopt the newly introduced +context.Context api to the stdlib -- set out to design a router that is faster, more modular and simpler than anything +else out there -- while not introducing any custom handler types or dependencies. Today, `chi` still has zero dependencies, +and in many ways is future proofed from changes, given it's minimal nature. Between versions, chi's iterations have been very +incremental, with the architecture and api being the same today as it was originally designed in 2016. For this reason it +makes chi a pretty easy project to maintain, as well thanks to the many amazing community contributions over the years +to who all help make chi better (total of 86 contributors to date -- thanks all!). + +Chi has been a labour of love, art and engineering, with the goals to offer beautiful ergonomics, flexibility, performance +and simplicity when building HTTP services with Go. I've strived to keep the router very minimal in surface area / code size, +and always improving the code wherever possible -- and as of today the `chi` package is just 1082 lines of code (not counting +middlewares, which are all optional). As well, I don't have the exact metrics, but from my analysis and email exchanges from +companies and developers, chi is used by thousands of projects around the world -- thank you all as there is no better form of +joy for me than to have art I had started be helpful and enjoyed by others. And of course I use chi in all of my own projects too :) + +For me, the aesthetics of chi's code and usage are very important. With the introduction of Go's module support +(which I'm a big fan of), chi's past versioning scheme choice to v2, v3 and v4 would mean I'd require the import path +of "github.com/go-chi/chi/v4", leading to the lengthy discussion at https://github.com/go-chi/chi/issues/462. +Haha, to some, you may be scratching your head why I've spent > 1 year stalling to adopt "/vXX" convention in the import +path -- which isn't horrible in general -- but for chi, I'm unable to accept it as I strive for perfection in it's API design, +aesthetics and simplicity. It just doesn't feel good to me given chi's simple nature -- I do not foresee a "v5" or "v6", +and upgrading between versions in the future will also be just incremental. + +I do understand versioning is a part of the API design as well, which is why the solution for a while has been to "do nothing", +as Go supports both old and new import paths with/out go.mod. However, now that Go module support has had time to iron out kinks and +is adopted everywhere, it's time for chi to get with the times. Luckily, I've discovered a path forward that will make me happy, +while also not breaking anyone's app who adopted a prior versioning from tags in v2/v3/v4. I've made an experimental release of +v1.5.0 with go.mod silently, and tested it with new and old projects, to ensure the developer experience is preserved, and it's +largely unnoticed. Fortunately, Go's toolchain will check the tags of a repo and consider the "latest" tag the one with go.mod. +However, you can still request a specific older tag such as v4.1.2, and everything will "just work". But new users can just +`go get github.com/go-chi/chi` or `go get github.com/go-chi/chi@latest` and they will get the latest version which contains +go.mod support, which is v1.5.0+. `chi` will not change very much over the years, just like it hasn't changed much from 4 years ago. +Therefore, we will stay on v1.x from here on, starting from v1.5.0. Any breaking changes will bump a "minor" release and +backwards-compatible improvements/fixes will bump a "tiny" release. + +For existing projects who want to upgrade to the latest go.mod version, run: `go get -u github.com/go-chi/chi@v1.5.0`, +which will get you on the go.mod version line (as Go's mod cache may still remember v4.x). Brand new systems can run +`go get -u github.com/go-chi/chi` or `go get -u github.com/go-chi/chi@latest` to install chi, which will install v1.5.0+ +built with go.mod support. + +My apologies to the developers who will disagree with the decisions above, but, hope you'll try it and see it's a very +minor request which is backwards compatible and won't break your existing installations. + +Cheers all, happy coding! + + +--- + + +## v4.1.2 (2020-06-02) + +- fix that handles MethodNotAllowed with path variables, thank you @caseyhadden for your contribution +- fix to replace nested wildcards correctly in RoutePattern, thank you @@unmultimedio for your contribution +- History of changes: see https://github.com/go-chi/chi/compare/v4.1.1...v4.1.2 + + +## v4.1.1 (2020-04-16) + +- fix for issue https://github.com/go-chi/chi/issues/411 which allows for overlapping regexp + route to the correct handler through a recursive tree search, thanks to @Jahaja for the PR/fix! +- new middleware.RouteHeaders as a simple router for request headers with wildcard support +- History of changes: see https://github.com/go-chi/chi/compare/v4.1.0...v4.1.1 + + +## v4.1.0 (2020-04-1) + +- middleware.LogEntry: Write method on interface now passes the response header + and an extra interface type useful for custom logger implementations. +- middleware.WrapResponseWriter: minor fix +- middleware.Recoverer: a bit prettier +- History of changes: see https://github.com/go-chi/chi/compare/v4.0.4...v4.1.0 + +## v4.0.4 (2020-03-24) + +- middleware.Recoverer: new pretty stack trace printing (https://github.com/go-chi/chi/pull/496) +- a few minor improvements and fixes +- History of changes: see https://github.com/go-chi/chi/compare/v4.0.3...v4.0.4 + + +## v4.0.3 (2020-01-09) + +- core: fix regexp routing to include default value when param is not matched +- middleware: rewrite of middleware.Compress +- middleware: suppress http.ErrAbortHandler in middleware.Recoverer +- History of changes: see https://github.com/go-chi/chi/compare/v4.0.2...v4.0.3 + + +## v4.0.2 (2019-02-26) + +- Minor fixes +- History of changes: see https://github.com/go-chi/chi/compare/v4.0.1...v4.0.2 + + +## v4.0.1 (2019-01-21) + +- Fixes issue with compress middleware: #382 #385 +- History of changes: see https://github.com/go-chi/chi/compare/v4.0.0...v4.0.1 + + +## v4.0.0 (2019-01-10) + +- chi v4 requires Go 1.10.3+ (or Go 1.9.7+) - we have deprecated support for Go 1.7 and 1.8 +- router: respond with 404 on router with no routes (#362) +- router: additional check to ensure wildcard is at the end of a url pattern (#333) +- middleware: deprecate use of http.CloseNotifier (#347) +- middleware: fix RedirectSlashes to include query params on redirect (#334) +- History of changes: see https://github.com/go-chi/chi/compare/v3.3.4...v4.0.0 + + +## v3.3.4 (2019-01-07) + +- Minor middleware improvements. No changes to core library/router. Moving v3 into its +- own branch as a version of chi for Go 1.7, 1.8, 1.9, 1.10, 1.11 +- History of changes: see https://github.com/go-chi/chi/compare/v3.3.3...v3.3.4 + + +## v3.3.3 (2018-08-27) + +- Minor release +- See https://github.com/go-chi/chi/compare/v3.3.2...v3.3.3 + + +## v3.3.2 (2017-12-22) + +- Support to route trailing slashes on mounted sub-routers (#281) +- middleware: new `ContentCharset` to check matching charsets. Thank you + @csucu for your community contribution! + + +## v3.3.1 (2017-11-20) + +- middleware: new `AllowContentType` handler for explicit whitelist of accepted request Content-Types +- middleware: new `SetHeader` handler for short-hand middleware to set a response header key/value +- Minor bug fixes + + +## v3.3.0 (2017-10-10) + +- New chi.RegisterMethod(method) to add support for custom HTTP methods, see _examples/custom-method for usage +- Deprecated LINK and UNLINK methods from the default list, please use `chi.RegisterMethod("LINK")` and `chi.RegisterMethod("UNLINK")` in an `init()` function + + +## v3.2.1 (2017-08-31) + +- Add new `Match(rctx *Context, method, path string) bool` method to `Routes` interface + and `Mux`. Match searches the mux's routing tree for a handler that matches the method/path +- Add new `RouteMethod` to `*Context` +- Add new `Routes` pointer to `*Context` +- Add new `middleware.GetHead` to route missing HEAD requests to GET handler +- Updated benchmarks (see README) + + +## v3.1.5 (2017-08-02) + +- Setup golint and go vet for the project +- As per golint, we've redefined `func ServerBaseContext(h http.Handler, baseCtx context.Context) http.Handler` + to `func ServerBaseContext(baseCtx context.Context, h http.Handler) http.Handler` + + +## v3.1.0 (2017-07-10) + +- Fix a few minor issues after v3 release +- Move `docgen` sub-pkg to https://github.com/go-chi/docgen +- Move `render` sub-pkg to https://github.com/go-chi/render +- Add new `URLFormat` handler to chi/middleware sub-pkg to make working with url mime + suffixes easier, ie. parsing `/articles/1.json` and `/articles/1.xml`. See comments in + https://github.com/go-chi/chi/blob/master/middleware/url_format.go for example usage. + + +## v3.0.0 (2017-06-21) + +- Major update to chi library with many exciting updates, but also some *breaking changes* +- URL parameter syntax changed from `/:id` to `/{id}` for even more flexible routing, such as + `/articles/{month}-{day}-{year}-{slug}`, `/articles/{id}`, and `/articles/{id}.{ext}` on the + same router +- Support for regexp for routing patterns, in the form of `/{paramKey:regExp}` for example: + `r.Get("/articles/{name:[a-z]+}", h)` and `chi.URLParam(r, "name")` +- Add `Method` and `MethodFunc` to `chi.Router` to allow routing definitions such as + `r.Method("GET", "/", h)` which provides a cleaner interface for custom handlers like + in `_examples/custom-handler` +- Deprecating `mux#FileServer` helper function. Instead, we encourage users to create their + own using file handler with the stdlib, see `_examples/fileserver` for an example +- Add support for LINK/UNLINK http methods via `r.Method()` and `r.MethodFunc()` +- Moved the chi project to its own organization, to allow chi-related community packages to + be easily discovered and supported, at: https://github.com/go-chi +- *NOTE:* please update your import paths to `"github.com/go-chi/chi"` +- *NOTE:* chi v2 is still available at https://github.com/go-chi/chi/tree/v2 + + +## v2.1.0 (2017-03-30) + +- Minor improvements and update to the chi core library +- Introduced a brand new `chi/render` sub-package to complete the story of building + APIs to offer a pattern for managing well-defined request / response payloads. Please + check out the updated `_examples/rest` example for how it works. +- Added `MethodNotAllowed(h http.HandlerFunc)` to chi.Router interface + + +## v2.0.0 (2017-01-06) + +- After many months of v2 being in an RC state with many companies and users running it in + production, the inclusion of some improvements to the middlewares, we are very pleased to + announce v2.0.0 of chi. + + +## v2.0.0-rc1 (2016-07-26) + +- Huge update! chi v2 is a large refactor targeting Go 1.7+. As of Go 1.7, the popular + community `"net/context"` package has been included in the standard library as `"context"` and + utilized by `"net/http"` and `http.Request` to managing deadlines, cancelation signals and other + request-scoped values. We're very excited about the new context addition and are proud to + introduce chi v2, a minimal and powerful routing package for building large HTTP services, + with zero external dependencies. Chi focuses on idiomatic design and encourages the use of + stdlib HTTP handlers and middlewares. +- chi v2 deprecates its `chi.Handler` interface and requires `http.Handler` or `http.HandlerFunc` +- chi v2 stores URL routing parameters and patterns in the standard request context: `r.Context()` +- chi v2 lower-level routing context is accessible by `chi.RouteContext(r.Context()) *chi.Context`, + which provides direct access to URL routing parameters, the routing path and the matching + routing patterns. +- Users upgrading from chi v1 to v2, need to: + 1. Update the old chi.Handler signature, `func(ctx context.Context, w http.ResponseWriter, r *http.Request)` to + the standard http.Handler: `func(w http.ResponseWriter, r *http.Request)` + 2. Use `chi.URLParam(r *http.Request, paramKey string) string` + or `URLParamFromCtx(ctx context.Context, paramKey string) string` to access a url parameter value + + +## v1.0.0 (2016-07-01) + +- Released chi v1 stable https://github.com/go-chi/chi/tree/v1.0.0 for Go 1.6 and older. + + +## v0.9.0 (2016-03-31) + +- Reuse context objects via sync.Pool for zero-allocation routing [#33](https://github.com/go-chi/chi/pull/33) +- BREAKING NOTE: due to subtle API changes, previously `chi.URLParams(ctx)["id"]` used to access url parameters + has changed to: `chi.URLParam(ctx, "id")` diff --git a/vendor/github.com/go-chi/chi/v5/CONTRIBUTING.md b/vendor/github.com/go-chi/chi/v5/CONTRIBUTING.md new file mode 100644 index 0000000000..b4a6268d57 --- /dev/null +++ b/vendor/github.com/go-chi/chi/v5/CONTRIBUTING.md @@ -0,0 +1,31 @@ +# Contributing + +## Prerequisites + +1. [Install Go][go-install]. +2. Download the sources and switch the working directory: + + ```bash + go get -u -d github.com/go-chi/chi + cd $GOPATH/src/github.com/go-chi/chi + ``` + +## Submitting a Pull Request + +A typical workflow is: + +1. [Fork the repository.][fork] +2. [Create a topic branch.][branch] +3. Add tests for your change. +4. Run `go test`. If your tests pass, return to the step 3. +5. Implement the change and ensure the steps from the previous step pass. +6. Run `goimports -w .`, to ensure the new code conforms to Go formatting guideline. +7. [Add, commit and push your changes.][git-help] +8. [Submit a pull request.][pull-req] + +[go-install]: https://golang.org/doc/install +[fork]: https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/working-with-forks/fork-a-repo +[branch]: https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/about-branches +[git-help]: https://docs.github.com/en +[pull-req]: https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/about-pull-requests + diff --git a/vendor/github.com/go-chi/chi/v5/LICENSE b/vendor/github.com/go-chi/chi/v5/LICENSE new file mode 100644 index 0000000000..d99f02ffac --- /dev/null +++ b/vendor/github.com/go-chi/chi/v5/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2015-present Peter Kieltyka (https://github.com/pkieltyka), Google Inc. + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/go-chi/chi/v5/Makefile b/vendor/github.com/go-chi/chi/v5/Makefile new file mode 100644 index 0000000000..e0f18c7da2 --- /dev/null +++ b/vendor/github.com/go-chi/chi/v5/Makefile @@ -0,0 +1,22 @@ +.PHONY: all +all: + @echo "**********************************************************" + @echo "** chi build tool **" + @echo "**********************************************************" + + +.PHONY: test +test: + go clean -testcache && $(MAKE) test-router && $(MAKE) test-middleware + +.PHONY: test-router +test-router: + go test -race -v . + +.PHONY: test-middleware +test-middleware: + go test -race -v ./middleware + +.PHONY: docs +docs: + npx docsify-cli serve ./docs diff --git a/vendor/github.com/go-chi/chi/v5/README.md b/vendor/github.com/go-chi/chi/v5/README.md new file mode 100644 index 0000000000..c58a0e20ce --- /dev/null +++ b/vendor/github.com/go-chi/chi/v5/README.md @@ -0,0 +1,505 @@ +# chi + + +[![GoDoc Widget]][GoDoc] + +`chi` is a lightweight, idiomatic and composable router for building Go HTTP services. It's +especially good at helping you write large REST API services that are kept maintainable as your +project grows and changes. `chi` is built on the new `context` package introduced in Go 1.7 to +handle signaling, cancelation and request-scoped values across a handler chain. + +The focus of the project has been to seek out an elegant and comfortable design for writing +REST API servers, written during the development of the Pressly API service that powers our +public API service, which in turn powers all of our client-side applications. + +The key considerations of chi's design are: project structure, maintainability, standard http +handlers (stdlib-only), developer productivity, and deconstructing a large system into many small +parts. The core router `github.com/go-chi/chi` is quite small (less than 1000 LOC), but we've also +included some useful/optional subpackages: [middleware](/middleware), [render](https://github.com/go-chi/render) +and [docgen](https://github.com/go-chi/docgen). We hope you enjoy it too! + +## Install + +```sh +go get -u github.com/go-chi/chi/v5 +``` + + +## Features + +* **Lightweight** - cloc'd in ~1000 LOC for the chi router +* **Fast** - yes, see [benchmarks](#benchmarks) +* **100% compatible with net/http** - use any http or middleware pkg in the ecosystem that is also compatible with `net/http` +* **Designed for modular/composable APIs** - middlewares, inline middlewares, route groups and sub-router mounting +* **Context control** - built on new `context` package, providing value chaining, cancellations and timeouts +* **Robust** - in production at Pressly, Cloudflare, Heroku, 99Designs, and many others (see [discussion](https://github.com/go-chi/chi/issues/91)) +* **Doc generation** - `docgen` auto-generates routing documentation from your source to JSON or Markdown +* **Go.mod support** - as of v5, go.mod support (see [CHANGELOG](https://github.com/go-chi/chi/blob/master/CHANGELOG.md)) +* **No external dependencies** - plain ol' Go stdlib + net/http + + +## Examples + +See [_examples/](https://github.com/go-chi/chi/blob/master/_examples/) for a variety of examples. + + +**As easy as:** + +```go +package main + +import ( + "net/http" + + "github.com/go-chi/chi/v5" + "github.com/go-chi/chi/v5/middleware" +) + +func main() { + r := chi.NewRouter() + r.Use(middleware.Logger) + r.Get("/", func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("welcome")) + }) + http.ListenAndServe(":3000", r) +} +``` + +**REST Preview:** + +Here is a little preview of what routing looks like with chi. Also take a look at the generated routing docs +in JSON ([routes.json](https://github.com/go-chi/chi/blob/master/_examples/rest/routes.json)) and in +Markdown ([routes.md](https://github.com/go-chi/chi/blob/master/_examples/rest/routes.md)). + +I highly recommend reading the source of the [examples](https://github.com/go-chi/chi/blob/master/_examples/) listed +above, they will show you all the features of chi and serve as a good form of documentation. + +```go +import ( + //... + "context" + "github.com/go-chi/chi/v5" + "github.com/go-chi/chi/v5/middleware" +) + +func main() { + r := chi.NewRouter() + + // A good base middleware stack + r.Use(middleware.RequestID) + r.Use(middleware.RealIP) + r.Use(middleware.Logger) + r.Use(middleware.Recoverer) + + // Set a timeout value on the request context (ctx), that will signal + // through ctx.Done() that the request has timed out and further + // processing should be stopped. + r.Use(middleware.Timeout(60 * time.Second)) + + r.Get("/", func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("hi")) + }) + + // RESTy routes for "articles" resource + r.Route("/articles", func(r chi.Router) { + r.With(paginate).Get("/", listArticles) // GET /articles + r.With(paginate).Get("/{month}-{day}-{year}", listArticlesByDate) // GET /articles/01-16-2017 + + r.Post("/", createArticle) // POST /articles + r.Get("/search", searchArticles) // GET /articles/search + + // Regexp url parameters: + r.Get("/{articleSlug:[a-z-]+}", getArticleBySlug) // GET /articles/home-is-toronto + + // Subrouters: + r.Route("/{articleID}", func(r chi.Router) { + r.Use(ArticleCtx) + r.Get("/", getArticle) // GET /articles/123 + r.Put("/", updateArticle) // PUT /articles/123 + r.Delete("/", deleteArticle) // DELETE /articles/123 + }) + }) + + // Mount the admin sub-router + r.Mount("/admin", adminRouter()) + + http.ListenAndServe(":3333", r) +} + +func ArticleCtx(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + articleID := chi.URLParam(r, "articleID") + article, err := dbGetArticle(articleID) + if err != nil { + http.Error(w, http.StatusText(404), 404) + return + } + ctx := context.WithValue(r.Context(), "article", article) + next.ServeHTTP(w, r.WithContext(ctx)) + }) +} + +func getArticle(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + article, ok := ctx.Value("article").(*Article) + if !ok { + http.Error(w, http.StatusText(422), 422) + return + } + w.Write([]byte(fmt.Sprintf("title:%s", article.Title))) +} + +// A completely separate router for administrator routes +func adminRouter() http.Handler { + r := chi.NewRouter() + r.Use(AdminOnly) + r.Get("/", adminIndex) + r.Get("/accounts", adminListAccounts) + return r +} + +func AdminOnly(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + perm, ok := ctx.Value("acl.permission").(YourPermissionType) + if !ok || !perm.IsAdmin() { + http.Error(w, http.StatusText(403), 403) + return + } + next.ServeHTTP(w, r) + }) +} +``` + + +## Router interface + +chi's router is based on a kind of [Patricia Radix trie](https://en.wikipedia.org/wiki/Radix_tree). +The router is fully compatible with `net/http`. + +Built on top of the tree is the `Router` interface: + +```go +// Router consisting of the core routing methods used by chi's Mux, +// using only the standard net/http. +type Router interface { + http.Handler + Routes + + // Use appends one or more middlewares onto the Router stack. + Use(middlewares ...func(http.Handler) http.Handler) + + // With adds inline middlewares for an endpoint handler. + With(middlewares ...func(http.Handler) http.Handler) Router + + // Group adds a new inline-Router along the current routing + // path, with a fresh middleware stack for the inline-Router. + Group(fn func(r Router)) Router + + // Route mounts a sub-Router along a `pattern` string. + Route(pattern string, fn func(r Router)) Router + + // Mount attaches another http.Handler along ./pattern/* + Mount(pattern string, h http.Handler) + + // Handle and HandleFunc adds routes for `pattern` that matches + // all HTTP methods. + Handle(pattern string, h http.Handler) + HandleFunc(pattern string, h http.HandlerFunc) + + // Method and MethodFunc adds routes for `pattern` that matches + // the `method` HTTP method. + Method(method, pattern string, h http.Handler) + MethodFunc(method, pattern string, h http.HandlerFunc) + + // HTTP-method routing along `pattern` + Connect(pattern string, h http.HandlerFunc) + Delete(pattern string, h http.HandlerFunc) + Get(pattern string, h http.HandlerFunc) + Head(pattern string, h http.HandlerFunc) + Options(pattern string, h http.HandlerFunc) + Patch(pattern string, h http.HandlerFunc) + Post(pattern string, h http.HandlerFunc) + Put(pattern string, h http.HandlerFunc) + Trace(pattern string, h http.HandlerFunc) + + // NotFound defines a handler to respond whenever a route could + // not be found. + NotFound(h http.HandlerFunc) + + // MethodNotAllowed defines a handler to respond whenever a method is + // not allowed. + MethodNotAllowed(h http.HandlerFunc) +} + +// Routes interface adds two methods for router traversal, which is also +// used by the github.com/go-chi/docgen package to generate documentation for Routers. +type Routes interface { + // Routes returns the routing tree in an easily traversable structure. + Routes() []Route + + // Middlewares returns the list of middlewares in use by the router. + Middlewares() Middlewares + + // Match searches the routing tree for a handler that matches + // the method/path - similar to routing a http request, but without + // executing the handler thereafter. + Match(rctx *Context, method, path string) bool +} +``` + +Each routing method accepts a URL `pattern` and chain of `handlers`. The URL pattern +supports named params (ie. `/users/{userID}`) and wildcards (ie. `/admin/*`). URL parameters +can be fetched at runtime by calling `chi.URLParam(r, "userID")` for named parameters +and `chi.URLParam(r, "*")` for a wildcard parameter. + + +### Middleware handlers + +chi's middlewares are just stdlib net/http middleware handlers. There is nothing special +about them, which means the router and all the tooling is designed to be compatible and +friendly with any middleware in the community. This offers much better extensibility and reuse +of packages and is at the heart of chi's purpose. + +Here is an example of a standard net/http middleware where we assign a context key `"user"` +the value of `"123"`. This middleware sets a hypothetical user identifier on the request +context and calls the next handler in the chain. + +```go +// HTTP middleware setting a value on the request context +func MyMiddleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // create new context from `r` request context, and assign key `"user"` + // to value of `"123"` + ctx := context.WithValue(r.Context(), "user", "123") + + // call the next handler in the chain, passing the response writer and + // the updated request object with the new context value. + // + // note: context.Context values are nested, so any previously set + // values will be accessible as well, and the new `"user"` key + // will be accessible from this point forward. + next.ServeHTTP(w, r.WithContext(ctx)) + }) +} +``` + + +### Request handlers + +chi uses standard net/http request handlers. This little snippet is an example of a http.Handler +func that reads a user identifier from the request context - hypothetically, identifying +the user sending an authenticated request, validated+set by a previous middleware handler. + +```go +// HTTP handler accessing data from the request context. +func MyRequestHandler(w http.ResponseWriter, r *http.Request) { + // here we read from the request context and fetch out `"user"` key set in + // the MyMiddleware example above. + user := r.Context().Value("user").(string) + + // respond to the client + w.Write([]byte(fmt.Sprintf("hi %s", user))) +} +``` + + +### URL parameters + +chi's router parses and stores URL parameters right onto the request context. Here is +an example of how to access URL params in your net/http handlers. And of course, middlewares +are able to access the same information. + +```go +// HTTP handler accessing the url routing parameters. +func MyRequestHandler(w http.ResponseWriter, r *http.Request) { + // fetch the url parameter `"userID"` from the request of a matching + // routing pattern. An example routing pattern could be: /users/{userID} + userID := chi.URLParam(r, "userID") + + // fetch `"key"` from the request context + ctx := r.Context() + key := ctx.Value("key").(string) + + // respond to the client + w.Write([]byte(fmt.Sprintf("hi %v, %v", userID, key))) +} +``` + + +## Middlewares + +chi comes equipped with an optional `middleware` package, providing a suite of standard +`net/http` middlewares. Please note, any middleware in the ecosystem that is also compatible +with `net/http` can be used with chi's mux. + +### Core middlewares + +---------------------------------------------------------------------------------------------------- +| chi/middleware Handler | description | +| :--------------------- | :---------------------------------------------------------------------- | +| [AllowContentEncoding] | Enforces a whitelist of request Content-Encoding headers | +| [AllowContentType] | Explicit whitelist of accepted request Content-Types | +| [BasicAuth] | Basic HTTP authentication | +| [Compress] | Gzip compression for clients that accept compressed responses | +| [ContentCharset] | Ensure charset for Content-Type request headers | +| [CleanPath] | Clean double slashes from request path | +| [GetHead] | Automatically route undefined HEAD requests to GET handlers | +| [Heartbeat] | Monitoring endpoint to check the servers pulse | +| [Logger] | Logs the start and end of each request with the elapsed processing time | +| [NoCache] | Sets response headers to prevent clients from caching | +| [Profiler] | Easily attach net/http/pprof to your routers | +| [RealIP] | Sets a http.Request's RemoteAddr to either X-Real-IP or X-Forwarded-For | +| [Recoverer] | Gracefully absorb panics and prints the stack trace | +| [RequestID] | Injects a request ID into the context of each request | +| [RedirectSlashes] | Redirect slashes on routing paths | +| [RouteHeaders] | Route handling for request headers | +| [SetHeader] | Short-hand middleware to set a response header key/value | +| [StripSlashes] | Strip slashes on routing paths | +| [Sunset] | Sunset set Deprecation/Sunset header to response | +| [Throttle] | Puts a ceiling on the number of concurrent requests | +| [Timeout] | Signals to the request context when the timeout deadline is reached | +| [URLFormat] | Parse extension from url and put it on request context | +| [WithValue] | Short-hand middleware to set a key/value on the request context | +---------------------------------------------------------------------------------------------------- + +[AllowContentEncoding]: https://pkg.go.dev/github.com/go-chi/chi/middleware#AllowContentEncoding +[AllowContentType]: https://pkg.go.dev/github.com/go-chi/chi/middleware#AllowContentType +[BasicAuth]: https://pkg.go.dev/github.com/go-chi/chi/middleware#BasicAuth +[Compress]: https://pkg.go.dev/github.com/go-chi/chi/middleware#Compress +[ContentCharset]: https://pkg.go.dev/github.com/go-chi/chi/middleware#ContentCharset +[CleanPath]: https://pkg.go.dev/github.com/go-chi/chi/middleware#CleanPath +[GetHead]: https://pkg.go.dev/github.com/go-chi/chi/middleware#GetHead +[GetReqID]: https://pkg.go.dev/github.com/go-chi/chi/middleware#GetReqID +[Heartbeat]: https://pkg.go.dev/github.com/go-chi/chi/middleware#Heartbeat +[Logger]: https://pkg.go.dev/github.com/go-chi/chi/middleware#Logger +[NoCache]: https://pkg.go.dev/github.com/go-chi/chi/middleware#NoCache +[Profiler]: https://pkg.go.dev/github.com/go-chi/chi/middleware#Profiler +[RealIP]: https://pkg.go.dev/github.com/go-chi/chi/middleware#RealIP +[Recoverer]: https://pkg.go.dev/github.com/go-chi/chi/middleware#Recoverer +[RedirectSlashes]: https://pkg.go.dev/github.com/go-chi/chi/middleware#RedirectSlashes +[RequestLogger]: https://pkg.go.dev/github.com/go-chi/chi/middleware#RequestLogger +[RequestID]: https://pkg.go.dev/github.com/go-chi/chi/middleware#RequestID +[RouteHeaders]: https://pkg.go.dev/github.com/go-chi/chi/middleware#RouteHeaders +[SetHeader]: https://pkg.go.dev/github.com/go-chi/chi/middleware#SetHeader +[StripSlashes]: https://pkg.go.dev/github.com/go-chi/chi/middleware#StripSlashes +[Sunset]: https://pkg.go.dev/github.com/go-chi/chi/v5/middleware#Sunset +[Throttle]: https://pkg.go.dev/github.com/go-chi/chi/middleware#Throttle +[ThrottleBacklog]: https://pkg.go.dev/github.com/go-chi/chi/middleware#ThrottleBacklog +[ThrottleWithOpts]: https://pkg.go.dev/github.com/go-chi/chi/middleware#ThrottleWithOpts +[Timeout]: https://pkg.go.dev/github.com/go-chi/chi/middleware#Timeout +[URLFormat]: https://pkg.go.dev/github.com/go-chi/chi/middleware#URLFormat +[WithLogEntry]: https://pkg.go.dev/github.com/go-chi/chi/middleware#WithLogEntry +[WithValue]: https://pkg.go.dev/github.com/go-chi/chi/middleware#WithValue +[Compressor]: https://pkg.go.dev/github.com/go-chi/chi/middleware#Compressor +[DefaultLogFormatter]: https://pkg.go.dev/github.com/go-chi/chi/middleware#DefaultLogFormatter +[EncoderFunc]: https://pkg.go.dev/github.com/go-chi/chi/middleware#EncoderFunc +[HeaderRoute]: https://pkg.go.dev/github.com/go-chi/chi/middleware#HeaderRoute +[HeaderRouter]: https://pkg.go.dev/github.com/go-chi/chi/middleware#HeaderRouter +[LogEntry]: https://pkg.go.dev/github.com/go-chi/chi/middleware#LogEntry +[LogFormatter]: https://pkg.go.dev/github.com/go-chi/chi/middleware#LogFormatter +[LoggerInterface]: https://pkg.go.dev/github.com/go-chi/chi/middleware#LoggerInterface +[ThrottleOpts]: https://pkg.go.dev/github.com/go-chi/chi/middleware#ThrottleOpts +[WrapResponseWriter]: https://pkg.go.dev/github.com/go-chi/chi/middleware#WrapResponseWriter + +### Extra middlewares & packages + +Please see https://github.com/go-chi for additional packages. + +-------------------------------------------------------------------------------------------------------------------- +| package | description | +|:---------------------------------------------------|:------------------------------------------------------------- +| [cors](https://github.com/go-chi/cors) | Cross-origin resource sharing (CORS) | +| [docgen](https://github.com/go-chi/docgen) | Print chi.Router routes at runtime | +| [jwtauth](https://github.com/go-chi/jwtauth) | JWT authentication | +| [hostrouter](https://github.com/go-chi/hostrouter) | Domain/host based request routing | +| [httplog](https://github.com/go-chi/httplog) | Small but powerful structured HTTP request logging | +| [httprate](https://github.com/go-chi/httprate) | HTTP request rate limiter | +| [httptracer](https://github.com/go-chi/httptracer) | HTTP request performance tracing library | +| [httpvcr](https://github.com/go-chi/httpvcr) | Write deterministic tests for external sources | +| [stampede](https://github.com/go-chi/stampede) | HTTP request coalescer | +-------------------------------------------------------------------------------------------------------------------- + + +## context? + +`context` is a tiny pkg that provides simple interface to signal context across call stacks +and goroutines. It was originally written by [Sameer Ajmani](https://github.com/Sajmani) +and is available in stdlib since go1.7. + +Learn more at https://blog.golang.org/context + +and.. +* Docs: https://golang.org/pkg/context +* Source: https://github.com/golang/go/tree/master/src/context + + +## Benchmarks + +The benchmark suite: https://github.com/pkieltyka/go-http-routing-benchmark + +Results as of Nov 29, 2020 with Go 1.15.5 on Linux AMD 3950x + +```shell +BenchmarkChi_Param 3075895 384 ns/op 400 B/op 2 allocs/op +BenchmarkChi_Param5 2116603 566 ns/op 400 B/op 2 allocs/op +BenchmarkChi_Param20 964117 1227 ns/op 400 B/op 2 allocs/op +BenchmarkChi_ParamWrite 2863413 420 ns/op 400 B/op 2 allocs/op +BenchmarkChi_GithubStatic 3045488 395 ns/op 400 B/op 2 allocs/op +BenchmarkChi_GithubParam 2204115 540 ns/op 400 B/op 2 allocs/op +BenchmarkChi_GithubAll 10000 113811 ns/op 81203 B/op 406 allocs/op +BenchmarkChi_GPlusStatic 3337485 359 ns/op 400 B/op 2 allocs/op +BenchmarkChi_GPlusParam 2825853 423 ns/op 400 B/op 2 allocs/op +BenchmarkChi_GPlus2Params 2471697 483 ns/op 400 B/op 2 allocs/op +BenchmarkChi_GPlusAll 194220 5950 ns/op 5200 B/op 26 allocs/op +BenchmarkChi_ParseStatic 3365324 356 ns/op 400 B/op 2 allocs/op +BenchmarkChi_ParseParam 2976614 404 ns/op 400 B/op 2 allocs/op +BenchmarkChi_Parse2Params 2638084 439 ns/op 400 B/op 2 allocs/op +BenchmarkChi_ParseAll 109567 11295 ns/op 10400 B/op 52 allocs/op +BenchmarkChi_StaticAll 16846 71308 ns/op 62802 B/op 314 allocs/op +``` + +Comparison with other routers: https://gist.github.com/pkieltyka/123032f12052520aaccab752bd3e78cc + +NOTE: the allocs in the benchmark above are from the calls to http.Request's +`WithContext(context.Context)` method that clones the http.Request, sets the `Context()` +on the duplicated (alloc'd) request and returns it the new request object. This is just +how setting context on a request in Go works. + + +## Credits + +* Carl Jackson for https://github.com/zenazn/goji + * Parts of chi's thinking comes from goji, and chi's middleware package + sources from [goji](https://github.com/zenazn/goji/tree/master/web/middleware). + * Please see goji's [LICENSE](https://github.com/zenazn/goji/blob/master/LICENSE) (MIT) +* Armon Dadgar for https://github.com/armon/go-radix +* Contributions: [@VojtechVitek](https://github.com/VojtechVitek) + +We'll be more than happy to see [your contributions](./CONTRIBUTING.md)! + + +## Beyond REST + +chi is just a http router that lets you decompose request handling into many smaller layers. +Many companies use chi to write REST services for their public APIs. But, REST is just a convention +for managing state via HTTP, and there's a lot of other pieces required to write a complete client-server +system or network of microservices. + +Looking beyond REST, I also recommend some newer works in the field: +* [webrpc](https://github.com/webrpc/webrpc) - Web-focused RPC client+server framework with code-gen +* [gRPC](https://github.com/grpc/grpc-go) - Google's RPC framework via protobufs +* [graphql](https://github.com/99designs/gqlgen) - Declarative query language +* [NATS](https://nats.io) - lightweight pub-sub + + +## License + +Copyright (c) 2015-present [Peter Kieltyka](https://github.com/pkieltyka) + +Licensed under [MIT License](./LICENSE) + +[GoDoc]: https://pkg.go.dev/github.com/go-chi/chi/v5 +[GoDoc Widget]: https://godoc.org/github.com/go-chi/chi?status.svg +[Travis]: https://travis-ci.org/go-chi/chi +[Travis Widget]: https://travis-ci.org/go-chi/chi.svg?branch=master diff --git a/vendor/github.com/go-chi/chi/v5/SECURITY.md b/vendor/github.com/go-chi/chi/v5/SECURITY.md new file mode 100644 index 0000000000..7e937f87f3 --- /dev/null +++ b/vendor/github.com/go-chi/chi/v5/SECURITY.md @@ -0,0 +1,5 @@ +# Reporting Security Issues + +We appreciate your efforts to responsibly disclose your findings, and will make every effort to acknowledge your contributions. + +To report a security issue, please use the GitHub Security Advisory ["Report a Vulnerability"](https://github.com/go-chi/chi/security/advisories/new) tab. diff --git a/vendor/github.com/go-chi/chi/v5/chain.go b/vendor/github.com/go-chi/chi/v5/chain.go new file mode 100644 index 0000000000..a2278414f4 --- /dev/null +++ b/vendor/github.com/go-chi/chi/v5/chain.go @@ -0,0 +1,49 @@ +package chi + +import "net/http" + +// Chain returns a Middlewares type from a slice of middleware handlers. +func Chain(middlewares ...func(http.Handler) http.Handler) Middlewares { + return Middlewares(middlewares) +} + +// Handler builds and returns a http.Handler from the chain of middlewares, +// with `h http.Handler` as the final handler. +func (mws Middlewares) Handler(h http.Handler) http.Handler { + return &ChainHandler{h, chain(mws, h), mws} +} + +// HandlerFunc builds and returns a http.Handler from the chain of middlewares, +// with `h http.Handler` as the final handler. +func (mws Middlewares) HandlerFunc(h http.HandlerFunc) http.Handler { + return &ChainHandler{h, chain(mws, h), mws} +} + +// ChainHandler is a http.Handler with support for handler composition and +// execution. +type ChainHandler struct { + Endpoint http.Handler + chain http.Handler + Middlewares Middlewares +} + +func (c *ChainHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + c.chain.ServeHTTP(w, r) +} + +// chain builds a http.Handler composed of an inline middleware stack and endpoint +// handler in the order they are passed. +func chain(middlewares []func(http.Handler) http.Handler, endpoint http.Handler) http.Handler { + // Return ahead of time if there aren't any middlewares for the chain + if len(middlewares) == 0 { + return endpoint + } + + // Wrap the end handler with the middleware chain + h := middlewares[len(middlewares)-1](endpoint) + for i := len(middlewares) - 2; i >= 0; i-- { + h = middlewares[i](h) + } + + return h +} diff --git a/vendor/github.com/go-chi/chi/v5/chi.go b/vendor/github.com/go-chi/chi/v5/chi.go new file mode 100644 index 0000000000..2b6ebd337c --- /dev/null +++ b/vendor/github.com/go-chi/chi/v5/chi.go @@ -0,0 +1,137 @@ +// Package chi is a small, idiomatic and composable router for building HTTP services. +// +// chi requires Go 1.14 or newer. +// +// Example: +// +// package main +// +// import ( +// "net/http" +// +// "github.com/go-chi/chi/v5" +// "github.com/go-chi/chi/v5/middleware" +// ) +// +// func main() { +// r := chi.NewRouter() +// r.Use(middleware.Logger) +// r.Use(middleware.Recoverer) +// +// r.Get("/", func(w http.ResponseWriter, r *http.Request) { +// w.Write([]byte("root.")) +// }) +// +// http.ListenAndServe(":3333", r) +// } +// +// See github.com/go-chi/chi/_examples/ for more in-depth examples. +// +// URL patterns allow for easy matching of path components in HTTP +// requests. The matching components can then be accessed using +// chi.URLParam(). All patterns must begin with a slash. +// +// A simple named placeholder {name} matches any sequence of characters +// up to the next / or the end of the URL. Trailing slashes on paths must +// be handled explicitly. +// +// A placeholder with a name followed by a colon allows a regular +// expression match, for example {number:\\d+}. The regular expression +// syntax is Go's normal regexp RE2 syntax, except that / will never be +// matched. An anonymous regexp pattern is allowed, using an empty string +// before the colon in the placeholder, such as {:\\d+} +// +// The special placeholder of asterisk matches the rest of the requested +// URL. Any trailing characters in the pattern are ignored. This is the only +// placeholder which will match / characters. +// +// Examples: +// +// "/user/{name}" matches "/user/jsmith" but not "/user/jsmith/info" or "/user/jsmith/" +// "/user/{name}/info" matches "/user/jsmith/info" +// "/page/*" matches "/page/intro/latest" +// "/page/{other}/latest" also matches "/page/intro/latest" +// "/date/{yyyy:\\d\\d\\d\\d}/{mm:\\d\\d}/{dd:\\d\\d}" matches "/date/2017/04/01" +package chi + +import "net/http" + +// NewRouter returns a new Mux object that implements the Router interface. +func NewRouter() *Mux { + return NewMux() +} + +// Router consisting of the core routing methods used by chi's Mux, +// using only the standard net/http. +type Router interface { + http.Handler + Routes + + // Use appends one or more middlewares onto the Router stack. + Use(middlewares ...func(http.Handler) http.Handler) + + // With adds inline middlewares for an endpoint handler. + With(middlewares ...func(http.Handler) http.Handler) Router + + // Group adds a new inline-Router along the current routing + // path, with a fresh middleware stack for the inline-Router. + Group(fn func(r Router)) Router + + // Route mounts a sub-Router along a `pattern`` string. + Route(pattern string, fn func(r Router)) Router + + // Mount attaches another http.Handler along ./pattern/* + Mount(pattern string, h http.Handler) + + // Handle and HandleFunc adds routes for `pattern` that matches + // all HTTP methods. + Handle(pattern string, h http.Handler) + HandleFunc(pattern string, h http.HandlerFunc) + + // Method and MethodFunc adds routes for `pattern` that matches + // the `method` HTTP method. + Method(method, pattern string, h http.Handler) + MethodFunc(method, pattern string, h http.HandlerFunc) + + // HTTP-method routing along `pattern` + Connect(pattern string, h http.HandlerFunc) + Delete(pattern string, h http.HandlerFunc) + Get(pattern string, h http.HandlerFunc) + Head(pattern string, h http.HandlerFunc) + Options(pattern string, h http.HandlerFunc) + Patch(pattern string, h http.HandlerFunc) + Post(pattern string, h http.HandlerFunc) + Put(pattern string, h http.HandlerFunc) + Trace(pattern string, h http.HandlerFunc) + + // NotFound defines a handler to respond whenever a route could + // not be found. + NotFound(h http.HandlerFunc) + + // MethodNotAllowed defines a handler to respond whenever a method is + // not allowed. + MethodNotAllowed(h http.HandlerFunc) +} + +// Routes interface adds two methods for router traversal, which is also +// used by the `docgen` subpackage to generation documentation for Routers. +type Routes interface { + // Routes returns the routing tree in an easily traversable structure. + Routes() []Route + + // Middlewares returns the list of middlewares in use by the router. + Middlewares() Middlewares + + // Match searches the routing tree for a handler that matches + // the method/path - similar to routing a http request, but without + // executing the handler thereafter. + Match(rctx *Context, method, path string) bool + + // Find searches the routing tree for the pattern that matches + // the method/path. + Find(rctx *Context, method, path string) string +} + +// Middlewares type is a slice of standard middleware handlers with methods +// to compose middleware chains and http.Handler's. +type Middlewares []func(http.Handler) http.Handler diff --git a/vendor/github.com/go-chi/chi/v5/context.go b/vendor/github.com/go-chi/chi/v5/context.go new file mode 100644 index 0000000000..aacf6eff72 --- /dev/null +++ b/vendor/github.com/go-chi/chi/v5/context.go @@ -0,0 +1,165 @@ +package chi + +import ( + "context" + "net/http" + "strings" +) + +// URLParam returns the url parameter from a http.Request object. +func URLParam(r *http.Request, key string) string { + if rctx := RouteContext(r.Context()); rctx != nil { + return rctx.URLParam(key) + } + return "" +} + +// URLParamFromCtx returns the url parameter from a http.Request Context. +func URLParamFromCtx(ctx context.Context, key string) string { + if rctx := RouteContext(ctx); rctx != nil { + return rctx.URLParam(key) + } + return "" +} + +// RouteContext returns chi's routing Context object from a +// http.Request Context. +func RouteContext(ctx context.Context) *Context { + val, _ := ctx.Value(RouteCtxKey).(*Context) + return val +} + +// NewRouteContext returns a new routing Context object. +func NewRouteContext() *Context { + return &Context{} +} + +var ( + // RouteCtxKey is the context.Context key to store the request context. + RouteCtxKey = &contextKey{"RouteContext"} +) + +// Context is the default routing context set on the root node of a +// request context to track route patterns, URL parameters and +// an optional routing path. +type Context struct { + Routes Routes + + // parentCtx is the parent of this one, for using Context as a + // context.Context directly. This is an optimization that saves + // 1 allocation. + parentCtx context.Context + + // Routing path/method override used during the route search. + // See Mux#routeHTTP method. + RoutePath string + RouteMethod string + + // URLParams are the stack of routeParams captured during the + // routing lifecycle across a stack of sub-routers. + URLParams RouteParams + + // Route parameters matched for the current sub-router. It is + // intentionally unexported so it can't be tampered. + routeParams RouteParams + + // The endpoint routing pattern that matched the request URI path + // or `RoutePath` of the current sub-router. This value will update + // during the lifecycle of a request passing through a stack of + // sub-routers. + routePattern string + + // Routing pattern stack throughout the lifecycle of the request, + // across all connected routers. It is a record of all matching + // patterns across a stack of sub-routers. + RoutePatterns []string + + methodsAllowed []methodTyp // allowed methods in case of a 405 + methodNotAllowed bool +} + +// Reset a routing context to its initial state. +func (x *Context) Reset() { + x.Routes = nil + x.RoutePath = "" + x.RouteMethod = "" + x.RoutePatterns = x.RoutePatterns[:0] + x.URLParams.Keys = x.URLParams.Keys[:0] + x.URLParams.Values = x.URLParams.Values[:0] + + x.routePattern = "" + x.routeParams.Keys = x.routeParams.Keys[:0] + x.routeParams.Values = x.routeParams.Values[:0] + x.methodNotAllowed = false + x.methodsAllowed = x.methodsAllowed[:0] + x.parentCtx = nil +} + +// URLParam returns the corresponding URL parameter value from the request +// routing context. +func (x *Context) URLParam(key string) string { + for k := len(x.URLParams.Keys) - 1; k >= 0; k-- { + if x.URLParams.Keys[k] == key { + return x.URLParams.Values[k] + } + } + return "" +} + +// RoutePattern builds the routing pattern string for the particular +// request, at the particular point during routing. This means, the value +// will change throughout the execution of a request in a router. That is +// why it's advised to only use this value after calling the next handler. +// +// For example, +// +// func Instrument(next http.Handler) http.Handler { +// return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { +// next.ServeHTTP(w, r) +// routePattern := chi.RouteContext(r.Context()).RoutePattern() +// measure(w, r, routePattern) +// }) +// } +func (x *Context) RoutePattern() string { + if x == nil { + return "" + } + routePattern := strings.Join(x.RoutePatterns, "") + routePattern = replaceWildcards(routePattern) + if routePattern != "/" { + routePattern = strings.TrimSuffix(routePattern, "//") + routePattern = strings.TrimSuffix(routePattern, "/") + } + return routePattern +} + +// replaceWildcards takes a route pattern and recursively replaces all +// occurrences of "/*/" to "/". +func replaceWildcards(p string) string { + if strings.Contains(p, "/*/") { + return replaceWildcards(strings.Replace(p, "/*/", "/", -1)) + } + return p +} + +// RouteParams is a structure to track URL routing parameters efficiently. +type RouteParams struct { + Keys, Values []string +} + +// Add will append a URL parameter to the end of the route param +func (s *RouteParams) Add(key, value string) { + s.Keys = append(s.Keys, key) + s.Values = append(s.Values, value) +} + +// contextKey is a value for use with context.WithValue. It's used as +// a pointer so it fits in an interface{} without allocation. This technique +// for defining context keys was copied from Go 1.7's new use of context in net/http. +type contextKey struct { + name string +} + +func (k *contextKey) String() string { + return "chi context value " + k.name +} diff --git a/vendor/github.com/go-chi/chi/v5/mux.go b/vendor/github.com/go-chi/chi/v5/mux.go new file mode 100644 index 0000000000..f1266971b4 --- /dev/null +++ b/vendor/github.com/go-chi/chi/v5/mux.go @@ -0,0 +1,527 @@ +package chi + +import ( + "context" + "fmt" + "net/http" + "strings" + "sync" +) + +var _ Router = &Mux{} + +// Mux is a simple HTTP route multiplexer that parses a request path, +// records any URL params, and executes an end handler. It implements +// the http.Handler interface and is friendly with the standard library. +// +// Mux is designed to be fast, minimal and offer a powerful API for building +// modular and composable HTTP services with a large set of handlers. It's +// particularly useful for writing large REST API services that break a handler +// into many smaller parts composed of middlewares and end handlers. +type Mux struct { + // The computed mux handler made of the chained middleware stack and + // the tree router + handler http.Handler + + // The radix trie router + tree *node + + // Custom method not allowed handler + methodNotAllowedHandler http.HandlerFunc + + // A reference to the parent mux used by subrouters when mounting + // to a parent mux + parent *Mux + + // Routing context pool + pool *sync.Pool + + // Custom route not found handler + notFoundHandler http.HandlerFunc + + // The middleware stack + middlewares []func(http.Handler) http.Handler + + // Controls the behaviour of middleware chain generation when a mux + // is registered as an inline group inside another mux. + inline bool +} + +// NewMux returns a newly initialized Mux object that implements the Router +// interface. +func NewMux() *Mux { + mux := &Mux{tree: &node{}, pool: &sync.Pool{}} + mux.pool.New = func() interface{} { + return NewRouteContext() + } + return mux +} + +// ServeHTTP is the single method of the http.Handler interface that makes +// Mux interoperable with the standard library. It uses a sync.Pool to get and +// reuse routing contexts for each request. +func (mx *Mux) ServeHTTP(w http.ResponseWriter, r *http.Request) { + // Ensure the mux has some routes defined on the mux + if mx.handler == nil { + mx.NotFoundHandler().ServeHTTP(w, r) + return + } + + // Check if a routing context already exists from a parent router. + rctx, _ := r.Context().Value(RouteCtxKey).(*Context) + if rctx != nil { + mx.handler.ServeHTTP(w, r) + return + } + + // Fetch a RouteContext object from the sync pool, and call the computed + // mx.handler that is comprised of mx.middlewares + mx.routeHTTP. + // Once the request is finished, reset the routing context and put it back + // into the pool for reuse from another request. + rctx = mx.pool.Get().(*Context) + rctx.Reset() + rctx.Routes = mx + rctx.parentCtx = r.Context() + + // NOTE: r.WithContext() causes 2 allocations and context.WithValue() causes 1 allocation + r = r.WithContext(context.WithValue(r.Context(), RouteCtxKey, rctx)) + + // Serve the request and once its done, put the request context back in the sync pool + mx.handler.ServeHTTP(w, r) + mx.pool.Put(rctx) +} + +// Use appends a middleware handler to the Mux middleware stack. +// +// The middleware stack for any Mux will execute before searching for a matching +// route to a specific handler, which provides opportunity to respond early, +// change the course of the request execution, or set request-scoped values for +// the next http.Handler. +func (mx *Mux) Use(middlewares ...func(http.Handler) http.Handler) { + if mx.handler != nil { + panic("chi: all middlewares must be defined before routes on a mux") + } + mx.middlewares = append(mx.middlewares, middlewares...) +} + +// Handle adds the route `pattern` that matches any http method to +// execute the `handler` http.Handler. +func (mx *Mux) Handle(pattern string, handler http.Handler) { + if method, rest, found := strings.Cut(pattern, " "); found { + mx.Method(method, rest, handler) + return + } + + mx.handle(mALL, pattern, handler) +} + +// HandleFunc adds the route `pattern` that matches any http method to +// execute the `handlerFn` http.HandlerFunc. +func (mx *Mux) HandleFunc(pattern string, handlerFn http.HandlerFunc) { + if method, rest, found := strings.Cut(pattern, " "); found { + mx.Method(method, rest, handlerFn) + return + } + + mx.handle(mALL, pattern, handlerFn) +} + +// Method adds the route `pattern` that matches `method` http method to +// execute the `handler` http.Handler. +func (mx *Mux) Method(method, pattern string, handler http.Handler) { + m, ok := methodMap[strings.ToUpper(method)] + if !ok { + panic(fmt.Sprintf("chi: '%s' http method is not supported.", method)) + } + mx.handle(m, pattern, handler) +} + +// MethodFunc adds the route `pattern` that matches `method` http method to +// execute the `handlerFn` http.HandlerFunc. +func (mx *Mux) MethodFunc(method, pattern string, handlerFn http.HandlerFunc) { + mx.Method(method, pattern, handlerFn) +} + +// Connect adds the route `pattern` that matches a CONNECT http method to +// execute the `handlerFn` http.HandlerFunc. +func (mx *Mux) Connect(pattern string, handlerFn http.HandlerFunc) { + mx.handle(mCONNECT, pattern, handlerFn) +} + +// Delete adds the route `pattern` that matches a DELETE http method to +// execute the `handlerFn` http.HandlerFunc. +func (mx *Mux) Delete(pattern string, handlerFn http.HandlerFunc) { + mx.handle(mDELETE, pattern, handlerFn) +} + +// Get adds the route `pattern` that matches a GET http method to +// execute the `handlerFn` http.HandlerFunc. +func (mx *Mux) Get(pattern string, handlerFn http.HandlerFunc) { + mx.handle(mGET, pattern, handlerFn) +} + +// Head adds the route `pattern` that matches a HEAD http method to +// execute the `handlerFn` http.HandlerFunc. +func (mx *Mux) Head(pattern string, handlerFn http.HandlerFunc) { + mx.handle(mHEAD, pattern, handlerFn) +} + +// Options adds the route `pattern` that matches an OPTIONS http method to +// execute the `handlerFn` http.HandlerFunc. +func (mx *Mux) Options(pattern string, handlerFn http.HandlerFunc) { + mx.handle(mOPTIONS, pattern, handlerFn) +} + +// Patch adds the route `pattern` that matches a PATCH http method to +// execute the `handlerFn` http.HandlerFunc. +func (mx *Mux) Patch(pattern string, handlerFn http.HandlerFunc) { + mx.handle(mPATCH, pattern, handlerFn) +} + +// Post adds the route `pattern` that matches a POST http method to +// execute the `handlerFn` http.HandlerFunc. +func (mx *Mux) Post(pattern string, handlerFn http.HandlerFunc) { + mx.handle(mPOST, pattern, handlerFn) +} + +// Put adds the route `pattern` that matches a PUT http method to +// execute the `handlerFn` http.HandlerFunc. +func (mx *Mux) Put(pattern string, handlerFn http.HandlerFunc) { + mx.handle(mPUT, pattern, handlerFn) +} + +// Trace adds the route `pattern` that matches a TRACE http method to +// execute the `handlerFn` http.HandlerFunc. +func (mx *Mux) Trace(pattern string, handlerFn http.HandlerFunc) { + mx.handle(mTRACE, pattern, handlerFn) +} + +// NotFound sets a custom http.HandlerFunc for routing paths that could +// not be found. The default 404 handler is `http.NotFound`. +func (mx *Mux) NotFound(handlerFn http.HandlerFunc) { + // Build NotFound handler chain + m := mx + hFn := handlerFn + if mx.inline && mx.parent != nil { + m = mx.parent + hFn = Chain(mx.middlewares...).HandlerFunc(hFn).ServeHTTP + } + + // Update the notFoundHandler from this point forward + m.notFoundHandler = hFn + m.updateSubRoutes(func(subMux *Mux) { + if subMux.notFoundHandler == nil { + subMux.NotFound(hFn) + } + }) +} + +// MethodNotAllowed sets a custom http.HandlerFunc for routing paths where the +// method is unresolved. The default handler returns a 405 with an empty body. +func (mx *Mux) MethodNotAllowed(handlerFn http.HandlerFunc) { + // Build MethodNotAllowed handler chain + m := mx + hFn := handlerFn + if mx.inline && mx.parent != nil { + m = mx.parent + hFn = Chain(mx.middlewares...).HandlerFunc(hFn).ServeHTTP + } + + // Update the methodNotAllowedHandler from this point forward + m.methodNotAllowedHandler = hFn + m.updateSubRoutes(func(subMux *Mux) { + if subMux.methodNotAllowedHandler == nil { + subMux.MethodNotAllowed(hFn) + } + }) +} + +// With adds inline middlewares for an endpoint handler. +func (mx *Mux) With(middlewares ...func(http.Handler) http.Handler) Router { + // Similarly as in handle(), we must build the mux handler once additional + // middleware registration isn't allowed for this stack, like now. + if !mx.inline && mx.handler == nil { + mx.updateRouteHandler() + } + + // Copy middlewares from parent inline muxs + var mws Middlewares + if mx.inline { + mws = make(Middlewares, len(mx.middlewares)) + copy(mws, mx.middlewares) + } + mws = append(mws, middlewares...) + + im := &Mux{ + pool: mx.pool, inline: true, parent: mx, tree: mx.tree, middlewares: mws, + notFoundHandler: mx.notFoundHandler, methodNotAllowedHandler: mx.methodNotAllowedHandler, + } + + return im +} + +// Group creates a new inline-Mux with a copy of middleware stack. It's useful +// for a group of handlers along the same routing path that use an additional +// set of middlewares. See _examples/. +func (mx *Mux) Group(fn func(r Router)) Router { + im := mx.With() + if fn != nil { + fn(im) + } + return im +} + +// Route creates a new Mux and mounts it along the `pattern` as a subrouter. +// Effectively, this is a short-hand call to Mount. See _examples/. +func (mx *Mux) Route(pattern string, fn func(r Router)) Router { + if fn == nil { + panic(fmt.Sprintf("chi: attempting to Route() a nil subrouter on '%s'", pattern)) + } + subRouter := NewRouter() + fn(subRouter) + mx.Mount(pattern, subRouter) + return subRouter +} + +// Mount attaches another http.Handler or chi Router as a subrouter along a routing +// path. It's very useful to split up a large API as many independent routers and +// compose them as a single service using Mount. See _examples/. +// +// Note that Mount() simply sets a wildcard along the `pattern` that will continue +// routing at the `handler`, which in most cases is another chi.Router. As a result, +// if you define two Mount() routes on the exact same pattern the mount will panic. +func (mx *Mux) Mount(pattern string, handler http.Handler) { + if handler == nil { + panic(fmt.Sprintf("chi: attempting to Mount() a nil handler on '%s'", pattern)) + } + + // Provide runtime safety for ensuring a pattern isn't mounted on an existing + // routing pattern. + if mx.tree.findPattern(pattern+"*") || mx.tree.findPattern(pattern+"/*") { + panic(fmt.Sprintf("chi: attempting to Mount() a handler on an existing path, '%s'", pattern)) + } + + // Assign sub-Router's with the parent not found & method not allowed handler if not specified. + subr, ok := handler.(*Mux) + if ok && subr.notFoundHandler == nil && mx.notFoundHandler != nil { + subr.NotFound(mx.notFoundHandler) + } + if ok && subr.methodNotAllowedHandler == nil && mx.methodNotAllowedHandler != nil { + subr.MethodNotAllowed(mx.methodNotAllowedHandler) + } + + mountHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + rctx := RouteContext(r.Context()) + + // shift the url path past the previous subrouter + rctx.RoutePath = mx.nextRoutePath(rctx) + + // reset the wildcard URLParam which connects the subrouter + n := len(rctx.URLParams.Keys) - 1 + if n >= 0 && rctx.URLParams.Keys[n] == "*" && len(rctx.URLParams.Values) > n { + rctx.URLParams.Values[n] = "" + } + + handler.ServeHTTP(w, r) + }) + + if pattern == "" || pattern[len(pattern)-1] != '/' { + mx.handle(mALL|mSTUB, pattern, mountHandler) + mx.handle(mALL|mSTUB, pattern+"/", mountHandler) + pattern += "/" + } + + method := mALL + subroutes, _ := handler.(Routes) + if subroutes != nil { + method |= mSTUB + } + n := mx.handle(method, pattern+"*", mountHandler) + + if subroutes != nil { + n.subroutes = subroutes + } +} + +// Routes returns a slice of routing information from the tree, +// useful for traversing available routes of a router. +func (mx *Mux) Routes() []Route { + return mx.tree.routes() +} + +// Middlewares returns a slice of middleware handler functions. +func (mx *Mux) Middlewares() Middlewares { + return mx.middlewares +} + +// Match searches the routing tree for a handler that matches the method/path. +// It's similar to routing a http request, but without executing the handler +// thereafter. +// +// Note: the *Context state is updated during execution, so manage +// the state carefully or make a NewRouteContext(). +func (mx *Mux) Match(rctx *Context, method, path string) bool { + return mx.Find(rctx, method, path) != "" +} + +// Find searches the routing tree for the pattern that matches +// the method/path. +// +// Note: the *Context state is updated during execution, so manage +// the state carefully or make a NewRouteContext(). +func (mx *Mux) Find(rctx *Context, method, path string) string { + m, ok := methodMap[method] + if !ok { + return "" + } + + node, _, _ := mx.tree.FindRoute(rctx, m, path) + pattern := rctx.routePattern + + if node != nil { + if node.subroutes == nil { + e := node.endpoints[m] + return e.pattern + } + + rctx.RoutePath = mx.nextRoutePath(rctx) + subPattern := node.subroutes.Find(rctx, method, rctx.RoutePath) + if subPattern == "" { + return "" + } + + pattern = strings.TrimSuffix(pattern, "/*") + pattern += subPattern + } + + return pattern +} + +// NotFoundHandler returns the default Mux 404 responder whenever a route +// cannot be found. +func (mx *Mux) NotFoundHandler() http.HandlerFunc { + if mx.notFoundHandler != nil { + return mx.notFoundHandler + } + return http.NotFound +} + +// MethodNotAllowedHandler returns the default Mux 405 responder whenever +// a method cannot be resolved for a route. +func (mx *Mux) MethodNotAllowedHandler(methodsAllowed ...methodTyp) http.HandlerFunc { + if mx.methodNotAllowedHandler != nil { + return mx.methodNotAllowedHandler + } + return methodNotAllowedHandler(methodsAllowed...) +} + +// handle registers a http.Handler in the routing tree for a particular http method +// and routing pattern. +func (mx *Mux) handle(method methodTyp, pattern string, handler http.Handler) *node { + if len(pattern) == 0 || pattern[0] != '/' { + panic(fmt.Sprintf("chi: routing pattern must begin with '/' in '%s'", pattern)) + } + + // Build the computed routing handler for this routing pattern. + if !mx.inline && mx.handler == nil { + mx.updateRouteHandler() + } + + // Build endpoint handler with inline middlewares for the route + var h http.Handler + if mx.inline { + mx.handler = http.HandlerFunc(mx.routeHTTP) + h = Chain(mx.middlewares...).Handler(handler) + } else { + h = handler + } + + // Add the endpoint to the tree and return the node + return mx.tree.InsertRoute(method, pattern, h) +} + +// routeHTTP routes a http.Request through the Mux routing tree to serve +// the matching handler for a particular http method. +func (mx *Mux) routeHTTP(w http.ResponseWriter, r *http.Request) { + // Grab the route context object + rctx := r.Context().Value(RouteCtxKey).(*Context) + + // The request routing path + routePath := rctx.RoutePath + if routePath == "" { + if r.URL.RawPath != "" { + routePath = r.URL.RawPath + } else { + routePath = r.URL.Path + } + if routePath == "" { + routePath = "/" + } + } + + // Check if method is supported by chi + if rctx.RouteMethod == "" { + rctx.RouteMethod = r.Method + } + method, ok := methodMap[rctx.RouteMethod] + if !ok { + mx.MethodNotAllowedHandler().ServeHTTP(w, r) + return + } + + // Find the route + if _, _, h := mx.tree.FindRoute(rctx, method, routePath); h != nil { + if supportsPathValue { + setPathValue(rctx, r) + } + + h.ServeHTTP(w, r) + return + } + if rctx.methodNotAllowed { + mx.MethodNotAllowedHandler(rctx.methodsAllowed...).ServeHTTP(w, r) + } else { + mx.NotFoundHandler().ServeHTTP(w, r) + } +} + +func (mx *Mux) nextRoutePath(rctx *Context) string { + routePath := "/" + nx := len(rctx.routeParams.Keys) - 1 // index of last param in list + if nx >= 0 && rctx.routeParams.Keys[nx] == "*" && len(rctx.routeParams.Values) > nx { + routePath = "/" + rctx.routeParams.Values[nx] + } + return routePath +} + +// Recursively update data on child routers. +func (mx *Mux) updateSubRoutes(fn func(subMux *Mux)) { + for _, r := range mx.tree.routes() { + subMux, ok := r.SubRoutes.(*Mux) + if !ok { + continue + } + fn(subMux) + } +} + +// updateRouteHandler builds the single mux handler that is a chain of the middleware +// stack, as defined by calls to Use(), and the tree router (Mux) itself. After this +// point, no other middlewares can be registered on this Mux's stack. But you can still +// compose additional middlewares via Group()'s or using a chained middleware handler. +func (mx *Mux) updateRouteHandler() { + mx.handler = chain(mx.middlewares, http.HandlerFunc(mx.routeHTTP)) +} + +// methodNotAllowedHandler is a helper function to respond with a 405, +// method not allowed. It sets the Allow header with the list of allowed +// methods for the route. +func methodNotAllowedHandler(methodsAllowed ...methodTyp) func(w http.ResponseWriter, r *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { + for _, m := range methodsAllowed { + w.Header().Add("Allow", reverseMethodMap[m]) + } + w.WriteHeader(405) + w.Write(nil) + } +} diff --git a/vendor/github.com/go-chi/chi/v5/path_value.go b/vendor/github.com/go-chi/chi/v5/path_value.go new file mode 100644 index 0000000000..77c840f019 --- /dev/null +++ b/vendor/github.com/go-chi/chi/v5/path_value.go @@ -0,0 +1,21 @@ +//go:build go1.22 && !tinygo +// +build go1.22,!tinygo + + +package chi + +import "net/http" + +// supportsPathValue is true if the Go version is 1.22 and above. +// +// If this is true, `net/http.Request` has methods `SetPathValue` and `PathValue`. +const supportsPathValue = true + +// setPathValue sets the path values in the Request value +// based on the provided request context. +func setPathValue(rctx *Context, r *http.Request) { + for i, key := range rctx.URLParams.Keys { + value := rctx.URLParams.Values[i] + r.SetPathValue(key, value) + } +} diff --git a/vendor/github.com/go-chi/chi/v5/path_value_fallback.go b/vendor/github.com/go-chi/chi/v5/path_value_fallback.go new file mode 100644 index 0000000000..749a8520a7 --- /dev/null +++ b/vendor/github.com/go-chi/chi/v5/path_value_fallback.go @@ -0,0 +1,19 @@ +//go:build !go1.22 || tinygo +// +build !go1.22 tinygo + +package chi + +import "net/http" + +// supportsPathValue is true if the Go version is 1.22 and above. +// +// If this is true, `net/http.Request` has methods `SetPathValue` and `PathValue`. +const supportsPathValue = false + +// setPathValue sets the path values in the Request value +// based on the provided request context. +// +// setPathValue is only supported in Go 1.22 and above so +// this is just a blank function so that it compiles. +func setPathValue(rctx *Context, r *http.Request) { +} diff --git a/vendor/github.com/go-chi/chi/v5/tree.go b/vendor/github.com/go-chi/chi/v5/tree.go new file mode 100644 index 0000000000..85fcfdbb8d --- /dev/null +++ b/vendor/github.com/go-chi/chi/v5/tree.go @@ -0,0 +1,890 @@ +package chi + +// Radix tree implementation below is a based on the original work by +// Armon Dadgar in https://github.com/armon/go-radix/blob/master/radix.go +// (MIT licensed). It's been heavily modified for use as a HTTP routing tree. + +import ( + "fmt" + "net/http" + "regexp" + "sort" + "strconv" + "strings" +) + +type methodTyp uint + +const ( + mSTUB methodTyp = 1 << iota + mCONNECT + mDELETE + mGET + mHEAD + mOPTIONS + mPATCH + mPOST + mPUT + mTRACE +) + +var mALL = mCONNECT | mDELETE | mGET | mHEAD | + mOPTIONS | mPATCH | mPOST | mPUT | mTRACE + +var methodMap = map[string]methodTyp{ + http.MethodConnect: mCONNECT, + http.MethodDelete: mDELETE, + http.MethodGet: mGET, + http.MethodHead: mHEAD, + http.MethodOptions: mOPTIONS, + http.MethodPatch: mPATCH, + http.MethodPost: mPOST, + http.MethodPut: mPUT, + http.MethodTrace: mTRACE, +} + +var reverseMethodMap = map[methodTyp]string{ + mCONNECT: http.MethodConnect, + mDELETE: http.MethodDelete, + mGET: http.MethodGet, + mHEAD: http.MethodHead, + mOPTIONS: http.MethodOptions, + mPATCH: http.MethodPatch, + mPOST: http.MethodPost, + mPUT: http.MethodPut, + mTRACE: http.MethodTrace, +} + +// RegisterMethod adds support for custom HTTP method handlers, available +// via Router#Method and Router#MethodFunc +func RegisterMethod(method string) { + if method == "" { + return + } + method = strings.ToUpper(method) + if _, ok := methodMap[method]; ok { + return + } + n := len(methodMap) + if n > strconv.IntSize-2 { + panic(fmt.Sprintf("chi: max number of methods reached (%d)", strconv.IntSize)) + } + mt := methodTyp(2 << n) + methodMap[method] = mt + mALL |= mt +} + +type nodeTyp uint8 + +const ( + ntStatic nodeTyp = iota // /home + ntRegexp // /{id:[0-9]+} + ntParam // /{user} + ntCatchAll // /api/v1/* +) + +type node struct { + // subroutes on the leaf node + subroutes Routes + + // regexp matcher for regexp nodes + rex *regexp.Regexp + + // HTTP handler endpoints on the leaf node + endpoints endpoints + + // prefix is the common prefix we ignore + prefix string + + // child nodes should be stored in-order for iteration, + // in groups of the node type. + children [ntCatchAll + 1]nodes + + // first byte of the child prefix + tail byte + + // node type: static, regexp, param, catchAll + typ nodeTyp + + // first byte of the prefix + label byte +} + +// endpoints is a mapping of http method constants to handlers +// for a given route. +type endpoints map[methodTyp]*endpoint + +type endpoint struct { + // endpoint handler + handler http.Handler + + // pattern is the routing pattern for handler nodes + pattern string + + // parameter keys recorded on handler nodes + paramKeys []string +} + +func (s endpoints) Value(method methodTyp) *endpoint { + mh, ok := s[method] + if !ok { + mh = &endpoint{} + s[method] = mh + } + return mh +} + +func (n *node) InsertRoute(method methodTyp, pattern string, handler http.Handler) *node { + var parent *node + search := pattern + + for { + // Handle key exhaustion + if len(search) == 0 { + // Insert or update the node's leaf handler + n.setEndpoint(method, handler, pattern) + return n + } + + // We're going to be searching for a wild node next, + // in this case, we need to get the tail + var label = search[0] + var segTail byte + var segEndIdx int + var segTyp nodeTyp + var segRexpat string + if label == '{' || label == '*' { + segTyp, _, segRexpat, segTail, _, segEndIdx = patNextSegment(search) + } + + var prefix string + if segTyp == ntRegexp { + prefix = segRexpat + } + + // Look for the edge to attach to + parent = n + n = n.getEdge(segTyp, label, segTail, prefix) + + // No edge, create one + if n == nil { + child := &node{label: label, tail: segTail, prefix: search} + hn := parent.addChild(child, search) + hn.setEndpoint(method, handler, pattern) + + return hn + } + + // Found an edge to match the pattern + + if n.typ > ntStatic { + // We found a param node, trim the param from the search path and continue. + // This param/wild pattern segment would already be on the tree from a previous + // call to addChild when creating a new node. + search = search[segEndIdx:] + continue + } + + // Static nodes fall below here. + // Determine longest prefix of the search key on match. + commonPrefix := longestPrefix(search, n.prefix) + if commonPrefix == len(n.prefix) { + // the common prefix is as long as the current node's prefix we're attempting to insert. + // keep the search going. + search = search[commonPrefix:] + continue + } + + // Split the node + child := &node{ + typ: ntStatic, + prefix: search[:commonPrefix], + } + parent.replaceChild(search[0], segTail, child) + + // Restore the existing node + n.label = n.prefix[commonPrefix] + n.prefix = n.prefix[commonPrefix:] + child.addChild(n, n.prefix) + + // If the new key is a subset, set the method/handler on this node and finish. + search = search[commonPrefix:] + if len(search) == 0 { + child.setEndpoint(method, handler, pattern) + return child + } + + // Create a new edge for the node + subchild := &node{ + typ: ntStatic, + label: search[0], + prefix: search, + } + hn := child.addChild(subchild, search) + hn.setEndpoint(method, handler, pattern) + return hn + } +} + +// addChild appends the new `child` node to the tree using the `pattern` as the trie key. +// For a URL router like chi's, we split the static, param, regexp and wildcard segments +// into different nodes. In addition, addChild will recursively call itself until every +// pattern segment is added to the url pattern tree as individual nodes, depending on type. +func (n *node) addChild(child *node, prefix string) *node { + search := prefix + + // handler leaf node added to the tree is the child. + // this may be overridden later down the flow + hn := child + + // Parse next segment + segTyp, _, segRexpat, segTail, segStartIdx, segEndIdx := patNextSegment(search) + + // Add child depending on next up segment + switch segTyp { + + case ntStatic: + // Search prefix is all static (that is, has no params in path) + // noop + + default: + // Search prefix contains a param, regexp or wildcard + + if segTyp == ntRegexp { + rex, err := regexp.Compile(segRexpat) + if err != nil { + panic(fmt.Sprintf("chi: invalid regexp pattern '%s' in route param", segRexpat)) + } + child.prefix = segRexpat + child.rex = rex + } + + if segStartIdx == 0 { + // Route starts with a param + child.typ = segTyp + + if segTyp == ntCatchAll { + segStartIdx = -1 + } else { + segStartIdx = segEndIdx + } + if segStartIdx < 0 { + segStartIdx = len(search) + } + child.tail = segTail // for params, we set the tail + + if segStartIdx != len(search) { + // add static edge for the remaining part, split the end. + // its not possible to have adjacent param nodes, so its certainly + // going to be a static node next. + + search = search[segStartIdx:] // advance search position + + nn := &node{ + typ: ntStatic, + label: search[0], + prefix: search, + } + hn = child.addChild(nn, search) + } + + } else if segStartIdx > 0 { + // Route has some param + + // starts with a static segment + child.typ = ntStatic + child.prefix = search[:segStartIdx] + child.rex = nil + + // add the param edge node + search = search[segStartIdx:] + + nn := &node{ + typ: segTyp, + label: search[0], + tail: segTail, + } + hn = child.addChild(nn, search) + + } + } + + n.children[child.typ] = append(n.children[child.typ], child) + n.children[child.typ].Sort() + return hn +} + +func (n *node) replaceChild(label, tail byte, child *node) { + for i := 0; i < len(n.children[child.typ]); i++ { + if n.children[child.typ][i].label == label && n.children[child.typ][i].tail == tail { + n.children[child.typ][i] = child + n.children[child.typ][i].label = label + n.children[child.typ][i].tail = tail + return + } + } + panic("chi: replacing missing child") +} + +func (n *node) getEdge(ntyp nodeTyp, label, tail byte, prefix string) *node { + nds := n.children[ntyp] + for i := 0; i < len(nds); i++ { + if nds[i].label == label && nds[i].tail == tail { + if ntyp == ntRegexp && nds[i].prefix != prefix { + continue + } + return nds[i] + } + } + return nil +} + +func (n *node) setEndpoint(method methodTyp, handler http.Handler, pattern string) { + // Set the handler for the method type on the node + if n.endpoints == nil { + n.endpoints = make(endpoints) + } + + paramKeys := patParamKeys(pattern) + + if method&mSTUB == mSTUB { + n.endpoints.Value(mSTUB).handler = handler + } + if method&mALL == mALL { + h := n.endpoints.Value(mALL) + h.handler = handler + h.pattern = pattern + h.paramKeys = paramKeys + for _, m := range methodMap { + h := n.endpoints.Value(m) + h.handler = handler + h.pattern = pattern + h.paramKeys = paramKeys + } + } else { + h := n.endpoints.Value(method) + h.handler = handler + h.pattern = pattern + h.paramKeys = paramKeys + } +} + +func (n *node) FindRoute(rctx *Context, method methodTyp, path string) (*node, endpoints, http.Handler) { + // Reset the context routing pattern and params + rctx.routePattern = "" + rctx.routeParams.Keys = rctx.routeParams.Keys[:0] + rctx.routeParams.Values = rctx.routeParams.Values[:0] + + // Find the routing handlers for the path + rn := n.findRoute(rctx, method, path) + if rn == nil { + return nil, nil, nil + } + + // Record the routing params in the request lifecycle + rctx.URLParams.Keys = append(rctx.URLParams.Keys, rctx.routeParams.Keys...) + rctx.URLParams.Values = append(rctx.URLParams.Values, rctx.routeParams.Values...) + + // Record the routing pattern in the request lifecycle + if rn.endpoints[method].pattern != "" { + rctx.routePattern = rn.endpoints[method].pattern + rctx.RoutePatterns = append(rctx.RoutePatterns, rctx.routePattern) + } + + return rn, rn.endpoints, rn.endpoints[method].handler +} + +// Recursive edge traversal by checking all nodeTyp groups along the way. +// It's like searching through a multi-dimensional radix trie. +func (n *node) findRoute(rctx *Context, method methodTyp, path string) *node { + nn := n + search := path + + for t, nds := range nn.children { + ntyp := nodeTyp(t) + if len(nds) == 0 { + continue + } + + var xn *node + xsearch := search + + var label byte + if search != "" { + label = search[0] + } + + switch ntyp { + case ntStatic: + xn = nds.findEdge(label) + if xn == nil || !strings.HasPrefix(xsearch, xn.prefix) { + continue + } + xsearch = xsearch[len(xn.prefix):] + + case ntParam, ntRegexp: + // short-circuit and return no matching route for empty param values + if xsearch == "" { + continue + } + + // serially loop through each node grouped by the tail delimiter + for idx := 0; idx < len(nds); idx++ { + xn = nds[idx] + + // label for param nodes is the delimiter byte + p := strings.IndexByte(xsearch, xn.tail) + + if p < 0 { + if xn.tail == '/' { + p = len(xsearch) + } else { + continue + } + } else if ntyp == ntRegexp && p == 0 { + continue + } + + if ntyp == ntRegexp && xn.rex != nil { + if !xn.rex.MatchString(xsearch[:p]) { + continue + } + } else if strings.IndexByte(xsearch[:p], '/') != -1 { + // avoid a match across path segments + continue + } + + prevlen := len(rctx.routeParams.Values) + rctx.routeParams.Values = append(rctx.routeParams.Values, xsearch[:p]) + xsearch = xsearch[p:] + + if len(xsearch) == 0 { + if xn.isLeaf() { + h := xn.endpoints[method] + if h != nil && h.handler != nil { + rctx.routeParams.Keys = append(rctx.routeParams.Keys, h.paramKeys...) + return xn + } + + for endpoints := range xn.endpoints { + if endpoints == mALL || endpoints == mSTUB { + continue + } + rctx.methodsAllowed = append(rctx.methodsAllowed, endpoints) + } + + // flag that the routing context found a route, but not a corresponding + // supported method + rctx.methodNotAllowed = true + } + } + + // recursively find the next node on this branch + fin := xn.findRoute(rctx, method, xsearch) + if fin != nil { + return fin + } + + // not found on this branch, reset vars + rctx.routeParams.Values = rctx.routeParams.Values[:prevlen] + xsearch = search + } + + rctx.routeParams.Values = append(rctx.routeParams.Values, "") + + default: + // catch-all nodes + rctx.routeParams.Values = append(rctx.routeParams.Values, search) + xn = nds[0] + xsearch = "" + } + + if xn == nil { + continue + } + + // did we find it yet? + if len(xsearch) == 0 { + if xn.isLeaf() { + h := xn.endpoints[method] + if h != nil && h.handler != nil { + rctx.routeParams.Keys = append(rctx.routeParams.Keys, h.paramKeys...) + return xn + } + + for endpoints := range xn.endpoints { + if endpoints == mALL || endpoints == mSTUB { + continue + } + rctx.methodsAllowed = append(rctx.methodsAllowed, endpoints) + } + + // flag that the routing context found a route, but not a corresponding + // supported method + rctx.methodNotAllowed = true + } + } + + // recursively find the next node.. + fin := xn.findRoute(rctx, method, xsearch) + if fin != nil { + return fin + } + + // Did not find final handler, let's remove the param here if it was set + if xn.typ > ntStatic { + if len(rctx.routeParams.Values) > 0 { + rctx.routeParams.Values = rctx.routeParams.Values[:len(rctx.routeParams.Values)-1] + } + } + + } + + return nil +} + +func (n *node) findEdge(ntyp nodeTyp, label byte) *node { + nds := n.children[ntyp] + num := len(nds) + idx := 0 + + switch ntyp { + case ntStatic, ntParam, ntRegexp: + i, j := 0, num-1 + for i <= j { + idx = i + (j-i)/2 + if label > nds[idx].label { + i = idx + 1 + } else if label < nds[idx].label { + j = idx - 1 + } else { + i = num // breaks cond + } + } + if nds[idx].label != label { + return nil + } + return nds[idx] + + default: // catch all + return nds[idx] + } +} + +func (n *node) isLeaf() bool { + return n.endpoints != nil +} + +func (n *node) findPattern(pattern string) bool { + nn := n + for _, nds := range nn.children { + if len(nds) == 0 { + continue + } + + n = nn.findEdge(nds[0].typ, pattern[0]) + if n == nil { + continue + } + + var idx int + var xpattern string + + switch n.typ { + case ntStatic: + idx = longestPrefix(pattern, n.prefix) + if idx < len(n.prefix) { + continue + } + + case ntParam, ntRegexp: + idx = strings.IndexByte(pattern, '}') + 1 + + case ntCatchAll: + idx = longestPrefix(pattern, "*") + + default: + panic("chi: unknown node type") + } + + xpattern = pattern[idx:] + if len(xpattern) == 0 { + return true + } + + return n.findPattern(xpattern) + } + return false +} + +func (n *node) routes() []Route { + rts := []Route{} + + n.walk(func(eps endpoints, subroutes Routes) bool { + if eps[mSTUB] != nil && eps[mSTUB].handler != nil && subroutes == nil { + return false + } + + // Group methodHandlers by unique patterns + pats := make(map[string]endpoints) + + for mt, h := range eps { + if h.pattern == "" { + continue + } + p, ok := pats[h.pattern] + if !ok { + p = endpoints{} + pats[h.pattern] = p + } + p[mt] = h + } + + for p, mh := range pats { + hs := make(map[string]http.Handler) + if mh[mALL] != nil && mh[mALL].handler != nil { + hs["*"] = mh[mALL].handler + } + + for mt, h := range mh { + if h.handler == nil { + continue + } + m := methodTypString(mt) + if m == "" { + continue + } + hs[m] = h.handler + } + + rt := Route{subroutes, hs, p} + rts = append(rts, rt) + } + + return false + }) + + return rts +} + +func (n *node) walk(fn func(eps endpoints, subroutes Routes) bool) bool { + // Visit the leaf values if any + if (n.endpoints != nil || n.subroutes != nil) && fn(n.endpoints, n.subroutes) { + return true + } + + // Recurse on the children + for _, ns := range n.children { + for _, cn := range ns { + if cn.walk(fn) { + return true + } + } + } + return false +} + +// patNextSegment returns the next segment details from a pattern: +// node type, param key, regexp string, param tail byte, param starting index, param ending index +func patNextSegment(pattern string) (nodeTyp, string, string, byte, int, int) { + ps := strings.Index(pattern, "{") + ws := strings.Index(pattern, "*") + + if ps < 0 && ws < 0 { + return ntStatic, "", "", 0, 0, len(pattern) // we return the entire thing + } + + // Sanity check + if ps >= 0 && ws >= 0 && ws < ps { + panic("chi: wildcard '*' must be the last pattern in a route, otherwise use a '{param}'") + } + + var tail byte = '/' // Default endpoint tail to / byte + + if ps >= 0 { + // Param/Regexp pattern is next + nt := ntParam + + // Read to closing } taking into account opens and closes in curl count (cc) + cc := 0 + pe := ps + for i, c := range pattern[ps:] { + if c == '{' { + cc++ + } else if c == '}' { + cc-- + if cc == 0 { + pe = ps + i + break + } + } + } + if pe == ps { + panic("chi: route param closing delimiter '}' is missing") + } + + key := pattern[ps+1 : pe] + pe++ // set end to next position + + if pe < len(pattern) { + tail = pattern[pe] + } + + key, rexpat, isRegexp := strings.Cut(key, ":") + if isRegexp { + nt = ntRegexp + } + + if len(rexpat) > 0 { + if rexpat[0] != '^' { + rexpat = "^" + rexpat + } + if rexpat[len(rexpat)-1] != '$' { + rexpat += "$" + } + } + + return nt, key, rexpat, tail, ps, pe + } + + // Wildcard pattern as finale + if ws < len(pattern)-1 { + panic("chi: wildcard '*' must be the last value in a route. trim trailing text or use a '{param}' instead") + } + return ntCatchAll, "*", "", 0, ws, len(pattern) +} + +func patParamKeys(pattern string) []string { + pat := pattern + paramKeys := []string{} + for { + ptyp, paramKey, _, _, _, e := patNextSegment(pat) + if ptyp == ntStatic { + return paramKeys + } + for i := 0; i < len(paramKeys); i++ { + if paramKeys[i] == paramKey { + panic(fmt.Sprintf("chi: routing pattern '%s' contains duplicate param key, '%s'", pattern, paramKey)) + } + } + paramKeys = append(paramKeys, paramKey) + pat = pat[e:] + } +} + +// longestPrefix finds the length of the shared prefix +// of two strings +func longestPrefix(k1, k2 string) int { + max := len(k1) + if l := len(k2); l < max { + max = l + } + var i int + for i = 0; i < max; i++ { + if k1[i] != k2[i] { + break + } + } + return i +} + +func methodTypString(method methodTyp) string { + for s, t := range methodMap { + if method == t { + return s + } + } + return "" +} + +type nodes []*node + +// Sort the list of nodes by label +func (ns nodes) Sort() { sort.Sort(ns); ns.tailSort() } +func (ns nodes) Len() int { return len(ns) } +func (ns nodes) Swap(i, j int) { ns[i], ns[j] = ns[j], ns[i] } +func (ns nodes) Less(i, j int) bool { return ns[i].label < ns[j].label } + +// tailSort pushes nodes with '/' as the tail to the end of the list for param nodes. +// The list order determines the traversal order. +func (ns nodes) tailSort() { + for i := len(ns) - 1; i >= 0; i-- { + if ns[i].typ > ntStatic && ns[i].tail == '/' { + ns.Swap(i, len(ns)-1) + return + } + } +} + +func (ns nodes) findEdge(label byte) *node { + num := len(ns) + idx := 0 + i, j := 0, num-1 + for i <= j { + idx = i + (j-i)/2 + if label > ns[idx].label { + i = idx + 1 + } else if label < ns[idx].label { + j = idx - 1 + } else { + i = num // breaks cond + } + } + if ns[idx].label != label { + return nil + } + return ns[idx] +} + +// Route describes the details of a routing handler. +// Handlers map key is an HTTP method +type Route struct { + SubRoutes Routes + Handlers map[string]http.Handler + Pattern string +} + +// WalkFunc is the type of the function called for each method and route visited by Walk. +type WalkFunc func(method string, route string, handler http.Handler, middlewares ...func(http.Handler) http.Handler) error + +// Walk walks any router tree that implements Routes interface. +func Walk(r Routes, walkFn WalkFunc) error { + return walk(r, walkFn, "") +} + +func walk(r Routes, walkFn WalkFunc, parentRoute string, parentMw ...func(http.Handler) http.Handler) error { + for _, route := range r.Routes() { + mws := make([]func(http.Handler) http.Handler, len(parentMw)) + copy(mws, parentMw) + mws = append(mws, r.Middlewares()...) + + if route.SubRoutes != nil { + if err := walk(route.SubRoutes, walkFn, parentRoute+route.Pattern, mws...); err != nil { + return err + } + continue + } + + for method, handler := range route.Handlers { + if method == "*" { + // Ignore a "catchAll" method, since we pass down all the specific methods for each route. + continue + } + + fullRoute := parentRoute + route.Pattern + fullRoute = strings.Replace(fullRoute, "/*/", "/", -1) + + if chain, ok := handler.(*ChainHandler); ok { + if err := walkFn(method, fullRoute, chain.Endpoint, append(mws, chain.Middlewares...)...); err != nil { + return err + } + } else { + if err := walkFn(method, fullRoute, handler, mws...); err != nil { + return err + } + } + } + } + + return nil +} diff --git a/vendor/github.com/go-jose/go-jose/v4/.gitignore b/vendor/github.com/go-jose/go-jose/v4/.gitignore deleted file mode 100644 index eb29ebaefd..0000000000 --- a/vendor/github.com/go-jose/go-jose/v4/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -jose-util/jose-util -jose-util.t.err \ No newline at end of file diff --git a/vendor/github.com/go-jose/go-jose/v4/.golangci.yml b/vendor/github.com/go-jose/go-jose/v4/.golangci.yml deleted file mode 100644 index 2a577a8f95..0000000000 --- a/vendor/github.com/go-jose/go-jose/v4/.golangci.yml +++ /dev/null @@ -1,53 +0,0 @@ -# https://github.com/golangci/golangci-lint - -run: - skip-files: - - doc_test.go - modules-download-mode: readonly - -linters: - enable-all: true - disable: - - gochecknoglobals - - goconst - - lll - - maligned - - nakedret - - scopelint - - unparam - - funlen # added in 1.18 (requires go-jose changes before it can be enabled) - -linters-settings: - gocyclo: - min-complexity: 35 - -issues: - exclude-rules: - - text: "don't use ALL_CAPS in Go names" - linters: - - golint - - text: "hardcoded credentials" - linters: - - gosec - - text: "weak cryptographic primitive" - linters: - - gosec - - path: json/ - linters: - - dupl - - errcheck - - gocritic - - gocyclo - - golint - - govet - - ineffassign - - staticcheck - - structcheck - - stylecheck - - unused - - path: _test\.go - linters: - - scopelint - - path: jwk.go - linters: - - gocyclo diff --git a/vendor/github.com/go-jose/go-jose/v4/.travis.yml b/vendor/github.com/go-jose/go-jose/v4/.travis.yml deleted file mode 100644 index 48de631b00..0000000000 --- a/vendor/github.com/go-jose/go-jose/v4/.travis.yml +++ /dev/null @@ -1,33 +0,0 @@ -language: go - -matrix: - fast_finish: true - allow_failures: - - go: tip - -go: - - "1.13.x" - - "1.14.x" - - tip - -before_script: - - export PATH=$HOME/.local/bin:$PATH - -before_install: - - go get -u github.com/mattn/goveralls github.com/wadey/gocovmerge - - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.18.0 - - pip install cram --user - -script: - - go test -v -covermode=count -coverprofile=profile.cov . - - go test -v -covermode=count -coverprofile=cryptosigner/profile.cov ./cryptosigner - - go test -v -covermode=count -coverprofile=cipher/profile.cov ./cipher - - go test -v -covermode=count -coverprofile=jwt/profile.cov ./jwt - - go test -v ./json # no coverage for forked encoding/json package - - golangci-lint run - - cd jose-util && go build && PATH=$PWD:$PATH cram -v jose-util.t # cram tests jose-util - - cd .. - -after_success: - - gocovmerge *.cov */*.cov > merged.coverprofile - - goveralls -coverprofile merged.coverprofile -service=travis-ci diff --git a/vendor/github.com/go-jose/go-jose/v4/CHANGELOG.md b/vendor/github.com/go-jose/go-jose/v4/CHANGELOG.md deleted file mode 100644 index 6f717dbd86..0000000000 --- a/vendor/github.com/go-jose/go-jose/v4/CHANGELOG.md +++ /dev/null @@ -1,96 +0,0 @@ -# v4.0.4 - -## Fixed - - - Reverted "Allow unmarshalling JSONWebKeySets with unsupported key types" as a - breaking change. See #136 / #137. - -# v4.0.3 - -## Changed - - - Allow unmarshalling JSONWebKeySets with unsupported key types (#130) - - Document that OpaqueKeyEncrypter can't be implemented (for now) (#129) - - Dependency updates - -# v4.0.2 - -## Changed - - - Improved documentation of Verify() to note that JSONWebKeySet is a supported - argument type (#104) - - Defined exported error values for missing x5c header and unsupported elliptic - curves error cases (#117) - -# v4.0.1 - -## Fixed - - - An attacker could send a JWE containing compressed data that used large - amounts of memory and CPU when decompressed by `Decrypt` or `DecryptMulti`. - Those functions now return an error if the decompressed data would exceed - 250kB or 10x the compressed size (whichever is larger). Thanks to - Enze Wang@Alioth and Jianjun Chen@Zhongguancun Lab (@zer0yu and @chenjj) - for reporting. - -# v4.0.0 - -This release makes some breaking changes in order to more thoroughly -address the vulnerabilities discussed in [Three New Attacks Against JSON Web -Tokens][1], "Sign/encrypt confusion", "Billion hash attack", and "Polyglot -token". - -## Changed - - - Limit JWT encryption types (exclude password or public key types) (#78) - - Enforce minimum length for HMAC keys (#85) - - jwt: match any audience in a list, rather than requiring all audiences (#81) - - jwt: accept only Compact Serialization (#75) - - jws: Add expected algorithms for signatures (#74) - - Require specifying expected algorithms for ParseEncrypted, - ParseSigned, ParseDetached, jwt.ParseEncrypted, jwt.ParseSigned, - jwt.ParseSignedAndEncrypted (#69, #74) - - Usually there is a small, known set of appropriate algorithms for a program - to use and it's a mistake to allow unexpected algorithms. For instance the - "billion hash attack" relies in part on programs accepting the PBES2 - encryption algorithm and doing the necessary work even if they weren't - specifically configured to allow PBES2. - - Revert "Strip padding off base64 strings" (#82) - - The specs require base64url encoding without padding. - - Minimum supported Go version is now 1.21 - -## Added - - - ParseSignedCompact, ParseSignedJSON, ParseEncryptedCompact, ParseEncryptedJSON. - - These allow parsing a specific serialization, as opposed to ParseSigned and - ParseEncrypted, which try to automatically detect which serialization was - provided. It's common to require a specific serialization for a specific - protocol - for instance JWT requires Compact serialization. - -[1]: https://i.blackhat.com/BH-US-23/Presentations/US-23-Tervoort-Three-New-Attacks-Against-JSON-Web-Tokens.pdf - -# v3.0.2 - -## Fixed - - - DecryptMulti: handle decompression error (#19) - -## Changed - - - jwe/CompactSerialize: improve performance (#67) - - Increase the default number of PBKDF2 iterations to 600k (#48) - - Return the proper algorithm for ECDSA keys (#45) - -## Added - - - Add Thumbprint support for opaque signers (#38) - -# v3.0.1 - -## Fixed - - - Security issue: an attacker specifying a large "p2c" value can cause - JSONWebEncryption.Decrypt and JSONWebEncryption.DecryptMulti to consume large - amounts of CPU, causing a DoS. Thanks to Matt Schwager (@mschwager) for the - disclosure and to Tom Tervoort for originally publishing the category of attack. - https://i.blackhat.com/BH-US-23/Presentations/US-23-Tervoort-Three-New-Attacks-Against-JSON-Web-Tokens.pdf diff --git a/vendor/github.com/go-jose/go-jose/v4/CONTRIBUTING.md b/vendor/github.com/go-jose/go-jose/v4/CONTRIBUTING.md deleted file mode 100644 index 4b4805add6..0000000000 --- a/vendor/github.com/go-jose/go-jose/v4/CONTRIBUTING.md +++ /dev/null @@ -1,9 +0,0 @@ -# Contributing - -If you would like to contribute code to go-jose you can do so through GitHub by -forking the repository and sending a pull request. - -When submitting code, please make every effort to follow existing conventions -and style in order to keep the code as readable as possible. Please also make -sure all tests pass by running `go test`, and format your code with `go fmt`. -We also recommend using `golint` and `errcheck`. diff --git a/vendor/github.com/go-jose/go-jose/v4/README.md b/vendor/github.com/go-jose/go-jose/v4/README.md deleted file mode 100644 index 02b5749546..0000000000 --- a/vendor/github.com/go-jose/go-jose/v4/README.md +++ /dev/null @@ -1,106 +0,0 @@ -# Go JOSE - -[![godoc](https://pkg.go.dev/badge/github.com/go-jose/go-jose/v4.svg)](https://pkg.go.dev/github.com/go-jose/go-jose/v4) -[![godoc](https://pkg.go.dev/badge/github.com/go-jose/go-jose/v4/jwt.svg)](https://pkg.go.dev/github.com/go-jose/go-jose/v4/jwt) -[![license](https://img.shields.io/badge/license-apache_2.0-blue.svg?style=flat)](https://raw.githubusercontent.com/go-jose/go-jose/master/LICENSE) -[![test](https://img.shields.io/github/checks-status/go-jose/go-jose/v4)](https://github.com/go-jose/go-jose/actions) - -Package jose aims to provide an implementation of the Javascript Object Signing -and Encryption set of standards. This includes support for JSON Web Encryption, -JSON Web Signature, and JSON Web Token standards. - -## Overview - -The implementation follows the -[JSON Web Encryption](https://dx.doi.org/10.17487/RFC7516) (RFC 7516), -[JSON Web Signature](https://dx.doi.org/10.17487/RFC7515) (RFC 7515), and -[JSON Web Token](https://dx.doi.org/10.17487/RFC7519) (RFC 7519) specifications. -Tables of supported algorithms are shown below. The library supports both -the compact and JWS/JWE JSON Serialization formats, and has optional support for -multiple recipients. It also comes with a small command-line utility -([`jose-util`](https://pkg.go.dev/github.com/go-jose/go-jose/jose-util)) -for dealing with JOSE messages in a shell. - -**Note**: We use a forked version of the `encoding/json` package from the Go -standard library which uses case-sensitive matching for member names (instead -of [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html)). -This is to avoid differences in interpretation of messages between go-jose and -libraries in other languages. - -### Versions - -[Version 4](https://github.com/go-jose/go-jose) -([branch](https://github.com/go-jose/go-jose/tree/main), -[doc](https://pkg.go.dev/github.com/go-jose/go-jose/v4), [releases](https://github.com/go-jose/go-jose/releases)) is the current stable version: - - import "github.com/go-jose/go-jose/v4" - -The old [square/go-jose](https://github.com/square/go-jose) repo contains the prior v1 and v2 versions, which -are still useable but not actively developed anymore. - -Version 3, in this repo, is still receiving security fixes but not functionality -updates. - -### Supported algorithms - -See below for a table of supported algorithms. Algorithm identifiers match -the names in the [JSON Web Algorithms](https://dx.doi.org/10.17487/RFC7518) -standard where possible. The Godoc reference has a list of constants. - - Key encryption | Algorithm identifier(s) - :------------------------- | :------------------------------ - RSA-PKCS#1v1.5 | RSA1_5 - RSA-OAEP | RSA-OAEP, RSA-OAEP-256 - AES key wrap | A128KW, A192KW, A256KW - AES-GCM key wrap | A128GCMKW, A192GCMKW, A256GCMKW - ECDH-ES + AES key wrap | ECDH-ES+A128KW, ECDH-ES+A192KW, ECDH-ES+A256KW - ECDH-ES (direct) | ECDH-ES1 - Direct encryption | dir1 - -1. Not supported in multi-recipient mode - - Signing / MAC | Algorithm identifier(s) - :------------------------- | :------------------------------ - RSASSA-PKCS#1v1.5 | RS256, RS384, RS512 - RSASSA-PSS | PS256, PS384, PS512 - HMAC | HS256, HS384, HS512 - ECDSA | ES256, ES384, ES512 - Ed25519 | EdDSA2 - -2. Only available in version 2 of the package - - Content encryption | Algorithm identifier(s) - :------------------------- | :------------------------------ - AES-CBC+HMAC | A128CBC-HS256, A192CBC-HS384, A256CBC-HS512 - AES-GCM | A128GCM, A192GCM, A256GCM - - Compression | Algorithm identifiers(s) - :------------------------- | ------------------------------- - DEFLATE (RFC 1951) | DEF - -### Supported key types - -See below for a table of supported key types. These are understood by the -library, and can be passed to corresponding functions such as `NewEncrypter` or -`NewSigner`. Each of these keys can also be wrapped in a JWK if desired, which -allows attaching a key id. - - Algorithm(s) | Corresponding types - :------------------------- | ------------------------------- - RSA | *[rsa.PublicKey](https://pkg.go.dev/crypto/rsa/#PublicKey), *[rsa.PrivateKey](https://pkg.go.dev/crypto/rsa/#PrivateKey) - ECDH, ECDSA | *[ecdsa.PublicKey](https://pkg.go.dev/crypto/ecdsa/#PublicKey), *[ecdsa.PrivateKey](https://pkg.go.dev/crypto/ecdsa/#PrivateKey) - EdDSA1 | [ed25519.PublicKey](https://pkg.go.dev/crypto/ed25519#PublicKey), [ed25519.PrivateKey](https://pkg.go.dev/crypto/ed25519#PrivateKey) - AES, HMAC | []byte - -1. Only available in version 2 or later of the package - -## Examples - -[![godoc](https://pkg.go.dev/badge/github.com/go-jose/go-jose/v4.svg)](https://pkg.go.dev/github.com/go-jose/go-jose/v4) -[![godoc](https://pkg.go.dev/badge/github.com/go-jose/go-jose/v4/jwt.svg)](https://pkg.go.dev/github.com/go-jose/go-jose/v4/jwt) - -Examples can be found in the Godoc -reference for this package. The -[`jose-util`](https://github.com/go-jose/go-jose/tree/main/jose-util) -subdirectory also contains a small command-line utility which might be useful -as an example as well. diff --git a/vendor/github.com/go-jose/go-jose/v4/SECURITY.md b/vendor/github.com/go-jose/go-jose/v4/SECURITY.md deleted file mode 100644 index 2f18a75a82..0000000000 --- a/vendor/github.com/go-jose/go-jose/v4/SECURITY.md +++ /dev/null @@ -1,13 +0,0 @@ -# Security Policy -This document explains how to contact the Let's Encrypt security team to report security vulnerabilities. - -## Supported Versions -| Version | Supported | -| ------- | ----------| -| >= v3 | ✓ | -| v2 | ✗ | -| v1 | ✗ | - -## Reporting a vulnerability - -Please see [https://letsencrypt.org/contact/#security](https://letsencrypt.org/contact/#security) for the email address to report a vulnerability. Ensure that the subject line for your report contains the word `vulnerability` and is descriptive. Your email should be acknowledged within 24 hours. If you do not receive a response within 24 hours, please follow-up again with another email. diff --git a/vendor/github.com/go-jose/go-jose/v4/asymmetric.go b/vendor/github.com/go-jose/go-jose/v4/asymmetric.go deleted file mode 100644 index f8d5774ef5..0000000000 --- a/vendor/github.com/go-jose/go-jose/v4/asymmetric.go +++ /dev/null @@ -1,595 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package jose - -import ( - "crypto" - "crypto/aes" - "crypto/ecdsa" - "crypto/ed25519" - "crypto/rand" - "crypto/rsa" - "crypto/sha1" - "crypto/sha256" - "errors" - "fmt" - "math/big" - - josecipher "github.com/go-jose/go-jose/v4/cipher" - "github.com/go-jose/go-jose/v4/json" -) - -// A generic RSA-based encrypter/verifier -type rsaEncrypterVerifier struct { - publicKey *rsa.PublicKey -} - -// A generic RSA-based decrypter/signer -type rsaDecrypterSigner struct { - privateKey *rsa.PrivateKey -} - -// A generic EC-based encrypter/verifier -type ecEncrypterVerifier struct { - publicKey *ecdsa.PublicKey -} - -type edEncrypterVerifier struct { - publicKey ed25519.PublicKey -} - -// A key generator for ECDH-ES -type ecKeyGenerator struct { - size int - algID string - publicKey *ecdsa.PublicKey -} - -// A generic EC-based decrypter/signer -type ecDecrypterSigner struct { - privateKey *ecdsa.PrivateKey -} - -type edDecrypterSigner struct { - privateKey ed25519.PrivateKey -} - -// newRSARecipient creates recipientKeyInfo based on the given key. -func newRSARecipient(keyAlg KeyAlgorithm, publicKey *rsa.PublicKey) (recipientKeyInfo, error) { - // Verify that key management algorithm is supported by this encrypter - switch keyAlg { - case RSA1_5, RSA_OAEP, RSA_OAEP_256: - default: - return recipientKeyInfo{}, ErrUnsupportedAlgorithm - } - - if publicKey == nil { - return recipientKeyInfo{}, errors.New("invalid public key") - } - - return recipientKeyInfo{ - keyAlg: keyAlg, - keyEncrypter: &rsaEncrypterVerifier{ - publicKey: publicKey, - }, - }, nil -} - -// newRSASigner creates a recipientSigInfo based on the given key. -func newRSASigner(sigAlg SignatureAlgorithm, privateKey *rsa.PrivateKey) (recipientSigInfo, error) { - // Verify that key management algorithm is supported by this encrypter - switch sigAlg { - case RS256, RS384, RS512, PS256, PS384, PS512: - default: - return recipientSigInfo{}, ErrUnsupportedAlgorithm - } - - if privateKey == nil { - return recipientSigInfo{}, errors.New("invalid private key") - } - - return recipientSigInfo{ - sigAlg: sigAlg, - publicKey: staticPublicKey(&JSONWebKey{ - Key: privateKey.Public(), - }), - signer: &rsaDecrypterSigner{ - privateKey: privateKey, - }, - }, nil -} - -func newEd25519Signer(sigAlg SignatureAlgorithm, privateKey ed25519.PrivateKey) (recipientSigInfo, error) { - if sigAlg != EdDSA { - return recipientSigInfo{}, ErrUnsupportedAlgorithm - } - - if privateKey == nil { - return recipientSigInfo{}, errors.New("invalid private key") - } - return recipientSigInfo{ - sigAlg: sigAlg, - publicKey: staticPublicKey(&JSONWebKey{ - Key: privateKey.Public(), - }), - signer: &edDecrypterSigner{ - privateKey: privateKey, - }, - }, nil -} - -// newECDHRecipient creates recipientKeyInfo based on the given key. -func newECDHRecipient(keyAlg KeyAlgorithm, publicKey *ecdsa.PublicKey) (recipientKeyInfo, error) { - // Verify that key management algorithm is supported by this encrypter - switch keyAlg { - case ECDH_ES, ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW: - default: - return recipientKeyInfo{}, ErrUnsupportedAlgorithm - } - - if publicKey == nil || !publicKey.Curve.IsOnCurve(publicKey.X, publicKey.Y) { - return recipientKeyInfo{}, errors.New("invalid public key") - } - - return recipientKeyInfo{ - keyAlg: keyAlg, - keyEncrypter: &ecEncrypterVerifier{ - publicKey: publicKey, - }, - }, nil -} - -// newECDSASigner creates a recipientSigInfo based on the given key. -func newECDSASigner(sigAlg SignatureAlgorithm, privateKey *ecdsa.PrivateKey) (recipientSigInfo, error) { - // Verify that key management algorithm is supported by this encrypter - switch sigAlg { - case ES256, ES384, ES512: - default: - return recipientSigInfo{}, ErrUnsupportedAlgorithm - } - - if privateKey == nil { - return recipientSigInfo{}, errors.New("invalid private key") - } - - return recipientSigInfo{ - sigAlg: sigAlg, - publicKey: staticPublicKey(&JSONWebKey{ - Key: privateKey.Public(), - }), - signer: &ecDecrypterSigner{ - privateKey: privateKey, - }, - }, nil -} - -// Encrypt the given payload and update the object. -func (ctx rsaEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) { - encryptedKey, err := ctx.encrypt(cek, alg) - if err != nil { - return recipientInfo{}, err - } - - return recipientInfo{ - encryptedKey: encryptedKey, - header: &rawHeader{}, - }, nil -} - -// Encrypt the given payload. Based on the key encryption algorithm, -// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256). -func (ctx rsaEncrypterVerifier) encrypt(cek []byte, alg KeyAlgorithm) ([]byte, error) { - switch alg { - case RSA1_5: - return rsa.EncryptPKCS1v15(RandReader, ctx.publicKey, cek) - case RSA_OAEP: - return rsa.EncryptOAEP(sha1.New(), RandReader, ctx.publicKey, cek, []byte{}) - case RSA_OAEP_256: - return rsa.EncryptOAEP(sha256.New(), RandReader, ctx.publicKey, cek, []byte{}) - } - - return nil, ErrUnsupportedAlgorithm -} - -// Decrypt the given payload and return the content encryption key. -func (ctx rsaDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) { - return ctx.decrypt(recipient.encryptedKey, headers.getAlgorithm(), generator) -} - -// Decrypt the given payload. Based on the key encryption algorithm, -// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256). -func (ctx rsaDecrypterSigner) decrypt(jek []byte, alg KeyAlgorithm, generator keyGenerator) ([]byte, error) { - // Note: The random reader on decrypt operations is only used for blinding, - // so stubbing is meanlingless (hence the direct use of rand.Reader). - switch alg { - case RSA1_5: - defer func() { - // DecryptPKCS1v15SessionKey sometimes panics on an invalid payload - // because of an index out of bounds error, which we want to ignore. - // This has been fixed in Go 1.3.1 (released 2014/08/13), the recover() - // only exists for preventing crashes with unpatched versions. - // See: https://groups.google.com/forum/#!topic/golang-dev/7ihX6Y6kx9k - // See: https://code.google.com/p/go/source/detail?r=58ee390ff31602edb66af41ed10901ec95904d33 - _ = recover() - }() - - // Perform some input validation. - keyBytes := ctx.privateKey.PublicKey.N.BitLen() / 8 - if keyBytes != len(jek) { - // Input size is incorrect, the encrypted payload should always match - // the size of the public modulus (e.g. using a 2048 bit key will - // produce 256 bytes of output). Reject this since it's invalid input. - return nil, ErrCryptoFailure - } - - cek, _, err := generator.genKey() - if err != nil { - return nil, ErrCryptoFailure - } - - // When decrypting an RSA-PKCS1v1.5 payload, we must take precautions to - // prevent chosen-ciphertext attacks as described in RFC 3218, "Preventing - // the Million Message Attack on Cryptographic Message Syntax". We are - // therefore deliberately ignoring errors here. - _ = rsa.DecryptPKCS1v15SessionKey(rand.Reader, ctx.privateKey, jek, cek) - - return cek, nil - case RSA_OAEP: - // Use rand.Reader for RSA blinding - return rsa.DecryptOAEP(sha1.New(), rand.Reader, ctx.privateKey, jek, []byte{}) - case RSA_OAEP_256: - // Use rand.Reader for RSA blinding - return rsa.DecryptOAEP(sha256.New(), rand.Reader, ctx.privateKey, jek, []byte{}) - } - - return nil, ErrUnsupportedAlgorithm -} - -// Sign the given payload -func (ctx rsaDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) { - var hash crypto.Hash - - switch alg { - case RS256, PS256: - hash = crypto.SHA256 - case RS384, PS384: - hash = crypto.SHA384 - case RS512, PS512: - hash = crypto.SHA512 - default: - return Signature{}, ErrUnsupportedAlgorithm - } - - hasher := hash.New() - - // According to documentation, Write() on hash never fails - _, _ = hasher.Write(payload) - hashed := hasher.Sum(nil) - - var out []byte - var err error - - switch alg { - case RS256, RS384, RS512: - // TODO(https://github.com/go-jose/go-jose/issues/40): As of go1.20, the - // random parameter is legacy and ignored, and it can be nil. - // https://cs.opensource.google/go/go/+/refs/tags/go1.20:src/crypto/rsa/pkcs1v15.go;l=263;bpv=0;bpt=1 - out, err = rsa.SignPKCS1v15(RandReader, ctx.privateKey, hash, hashed) - case PS256, PS384, PS512: - out, err = rsa.SignPSS(RandReader, ctx.privateKey, hash, hashed, &rsa.PSSOptions{ - SaltLength: rsa.PSSSaltLengthEqualsHash, - }) - } - - if err != nil { - return Signature{}, err - } - - return Signature{ - Signature: out, - protected: &rawHeader{}, - }, nil -} - -// Verify the given payload -func (ctx rsaEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error { - var hash crypto.Hash - - switch alg { - case RS256, PS256: - hash = crypto.SHA256 - case RS384, PS384: - hash = crypto.SHA384 - case RS512, PS512: - hash = crypto.SHA512 - default: - return ErrUnsupportedAlgorithm - } - - hasher := hash.New() - - // According to documentation, Write() on hash never fails - _, _ = hasher.Write(payload) - hashed := hasher.Sum(nil) - - switch alg { - case RS256, RS384, RS512: - return rsa.VerifyPKCS1v15(ctx.publicKey, hash, hashed, signature) - case PS256, PS384, PS512: - return rsa.VerifyPSS(ctx.publicKey, hash, hashed, signature, nil) - } - - return ErrUnsupportedAlgorithm -} - -// Encrypt the given payload and update the object. -func (ctx ecEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) { - switch alg { - case ECDH_ES: - // ECDH-ES mode doesn't wrap a key, the shared secret is used directly as the key. - return recipientInfo{ - header: &rawHeader{}, - }, nil - case ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW: - default: - return recipientInfo{}, ErrUnsupportedAlgorithm - } - - generator := ecKeyGenerator{ - algID: string(alg), - publicKey: ctx.publicKey, - } - - switch alg { - case ECDH_ES_A128KW: - generator.size = 16 - case ECDH_ES_A192KW: - generator.size = 24 - case ECDH_ES_A256KW: - generator.size = 32 - } - - kek, header, err := generator.genKey() - if err != nil { - return recipientInfo{}, err - } - - block, err := aes.NewCipher(kek) - if err != nil { - return recipientInfo{}, err - } - - jek, err := josecipher.KeyWrap(block, cek) - if err != nil { - return recipientInfo{}, err - } - - return recipientInfo{ - encryptedKey: jek, - header: &header, - }, nil -} - -// Get key size for EC key generator -func (ctx ecKeyGenerator) keySize() int { - return ctx.size -} - -// Get a content encryption key for ECDH-ES -func (ctx ecKeyGenerator) genKey() ([]byte, rawHeader, error) { - priv, err := ecdsa.GenerateKey(ctx.publicKey.Curve, RandReader) - if err != nil { - return nil, rawHeader{}, err - } - - out := josecipher.DeriveECDHES(ctx.algID, []byte{}, []byte{}, priv, ctx.publicKey, ctx.size) - - b, err := json.Marshal(&JSONWebKey{ - Key: &priv.PublicKey, - }) - if err != nil { - return nil, nil, err - } - - headers := rawHeader{ - headerEPK: makeRawMessage(b), - } - - return out, headers, nil -} - -// Decrypt the given payload and return the content encryption key. -func (ctx ecDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) { - epk, err := headers.getEPK() - if err != nil { - return nil, errors.New("go-jose/go-jose: invalid epk header") - } - if epk == nil { - return nil, errors.New("go-jose/go-jose: missing epk header") - } - - publicKey, ok := epk.Key.(*ecdsa.PublicKey) - if publicKey == nil || !ok { - return nil, errors.New("go-jose/go-jose: invalid epk header") - } - - if !ctx.privateKey.Curve.IsOnCurve(publicKey.X, publicKey.Y) { - return nil, errors.New("go-jose/go-jose: invalid public key in epk header") - } - - apuData, err := headers.getAPU() - if err != nil { - return nil, errors.New("go-jose/go-jose: invalid apu header") - } - apvData, err := headers.getAPV() - if err != nil { - return nil, errors.New("go-jose/go-jose: invalid apv header") - } - - deriveKey := func(algID string, size int) []byte { - return josecipher.DeriveECDHES(algID, apuData.bytes(), apvData.bytes(), ctx.privateKey, publicKey, size) - } - - var keySize int - - algorithm := headers.getAlgorithm() - switch algorithm { - case ECDH_ES: - // ECDH-ES uses direct key agreement, no key unwrapping necessary. - return deriveKey(string(headers.getEncryption()), generator.keySize()), nil - case ECDH_ES_A128KW: - keySize = 16 - case ECDH_ES_A192KW: - keySize = 24 - case ECDH_ES_A256KW: - keySize = 32 - default: - return nil, ErrUnsupportedAlgorithm - } - - key := deriveKey(string(algorithm), keySize) - block, err := aes.NewCipher(key) - if err != nil { - return nil, err - } - - return josecipher.KeyUnwrap(block, recipient.encryptedKey) -} - -func (ctx edDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) { - if alg != EdDSA { - return Signature{}, ErrUnsupportedAlgorithm - } - - sig, err := ctx.privateKey.Sign(RandReader, payload, crypto.Hash(0)) - if err != nil { - return Signature{}, err - } - - return Signature{ - Signature: sig, - protected: &rawHeader{}, - }, nil -} - -func (ctx edEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error { - if alg != EdDSA { - return ErrUnsupportedAlgorithm - } - ok := ed25519.Verify(ctx.publicKey, payload, signature) - if !ok { - return errors.New("go-jose/go-jose: ed25519 signature failed to verify") - } - return nil -} - -// Sign the given payload -func (ctx ecDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) { - var expectedBitSize int - var hash crypto.Hash - - switch alg { - case ES256: - expectedBitSize = 256 - hash = crypto.SHA256 - case ES384: - expectedBitSize = 384 - hash = crypto.SHA384 - case ES512: - expectedBitSize = 521 - hash = crypto.SHA512 - } - - curveBits := ctx.privateKey.Curve.Params().BitSize - if expectedBitSize != curveBits { - return Signature{}, fmt.Errorf("go-jose/go-jose: expected %d bit key, got %d bits instead", expectedBitSize, curveBits) - } - - hasher := hash.New() - - // According to documentation, Write() on hash never fails - _, _ = hasher.Write(payload) - hashed := hasher.Sum(nil) - - r, s, err := ecdsa.Sign(RandReader, ctx.privateKey, hashed) - if err != nil { - return Signature{}, err - } - - keyBytes := curveBits / 8 - if curveBits%8 > 0 { - keyBytes++ - } - - // We serialize the outputs (r and s) into big-endian byte arrays and pad - // them with zeros on the left to make sure the sizes work out. Both arrays - // must be keyBytes long, and the output must be 2*keyBytes long. - rBytes := r.Bytes() - rBytesPadded := make([]byte, keyBytes) - copy(rBytesPadded[keyBytes-len(rBytes):], rBytes) - - sBytes := s.Bytes() - sBytesPadded := make([]byte, keyBytes) - copy(sBytesPadded[keyBytes-len(sBytes):], sBytes) - - out := append(rBytesPadded, sBytesPadded...) - - return Signature{ - Signature: out, - protected: &rawHeader{}, - }, nil -} - -// Verify the given payload -func (ctx ecEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error { - var keySize int - var hash crypto.Hash - - switch alg { - case ES256: - keySize = 32 - hash = crypto.SHA256 - case ES384: - keySize = 48 - hash = crypto.SHA384 - case ES512: - keySize = 66 - hash = crypto.SHA512 - default: - return ErrUnsupportedAlgorithm - } - - if len(signature) != 2*keySize { - return fmt.Errorf("go-jose/go-jose: invalid signature size, have %d bytes, wanted %d", len(signature), 2*keySize) - } - - hasher := hash.New() - - // According to documentation, Write() on hash never fails - _, _ = hasher.Write(payload) - hashed := hasher.Sum(nil) - - r := big.NewInt(0).SetBytes(signature[:keySize]) - s := big.NewInt(0).SetBytes(signature[keySize:]) - - match := ecdsa.Verify(ctx.publicKey, hashed, r, s) - if !match { - return errors.New("go-jose/go-jose: ecdsa signature failed to verify") - } - - return nil -} diff --git a/vendor/github.com/go-jose/go-jose/v4/cipher/cbc_hmac.go b/vendor/github.com/go-jose/go-jose/v4/cipher/cbc_hmac.go deleted file mode 100644 index af029cec0b..0000000000 --- a/vendor/github.com/go-jose/go-jose/v4/cipher/cbc_hmac.go +++ /dev/null @@ -1,196 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package josecipher - -import ( - "bytes" - "crypto/cipher" - "crypto/hmac" - "crypto/sha256" - "crypto/sha512" - "crypto/subtle" - "encoding/binary" - "errors" - "hash" -) - -const ( - nonceBytes = 16 -) - -// NewCBCHMAC instantiates a new AEAD based on CBC+HMAC. -func NewCBCHMAC(key []byte, newBlockCipher func([]byte) (cipher.Block, error)) (cipher.AEAD, error) { - keySize := len(key) / 2 - integrityKey := key[:keySize] - encryptionKey := key[keySize:] - - blockCipher, err := newBlockCipher(encryptionKey) - if err != nil { - return nil, err - } - - var hash func() hash.Hash - switch keySize { - case 16: - hash = sha256.New - case 24: - hash = sha512.New384 - case 32: - hash = sha512.New - } - - return &cbcAEAD{ - hash: hash, - blockCipher: blockCipher, - authtagBytes: keySize, - integrityKey: integrityKey, - }, nil -} - -// An AEAD based on CBC+HMAC -type cbcAEAD struct { - hash func() hash.Hash - authtagBytes int - integrityKey []byte - blockCipher cipher.Block -} - -func (ctx *cbcAEAD) NonceSize() int { - return nonceBytes -} - -func (ctx *cbcAEAD) Overhead() int { - // Maximum overhead is block size (for padding) plus auth tag length, where - // the length of the auth tag is equivalent to the key size. - return ctx.blockCipher.BlockSize() + ctx.authtagBytes -} - -// Seal encrypts and authenticates the plaintext. -func (ctx *cbcAEAD) Seal(dst, nonce, plaintext, data []byte) []byte { - // Output buffer -- must take care not to mangle plaintext input. - ciphertext := make([]byte, uint64(len(plaintext))+uint64(ctx.Overhead()))[:len(plaintext)] - copy(ciphertext, plaintext) - ciphertext = padBuffer(ciphertext, ctx.blockCipher.BlockSize()) - - cbc := cipher.NewCBCEncrypter(ctx.blockCipher, nonce) - - cbc.CryptBlocks(ciphertext, ciphertext) - authtag := ctx.computeAuthTag(data, nonce, ciphertext) - - ret, out := resize(dst, uint64(len(dst))+uint64(len(ciphertext))+uint64(len(authtag))) - copy(out, ciphertext) - copy(out[len(ciphertext):], authtag) - - return ret -} - -// Open decrypts and authenticates the ciphertext. -func (ctx *cbcAEAD) Open(dst, nonce, ciphertext, data []byte) ([]byte, error) { - if len(ciphertext) < ctx.authtagBytes { - return nil, errors.New("go-jose/go-jose: invalid ciphertext (too short)") - } - - offset := len(ciphertext) - ctx.authtagBytes - expectedTag := ctx.computeAuthTag(data, nonce, ciphertext[:offset]) - match := subtle.ConstantTimeCompare(expectedTag, ciphertext[offset:]) - if match != 1 { - return nil, errors.New("go-jose/go-jose: invalid ciphertext (auth tag mismatch)") - } - - cbc := cipher.NewCBCDecrypter(ctx.blockCipher, nonce) - - // Make copy of ciphertext buffer, don't want to modify in place - buffer := append([]byte{}, ciphertext[:offset]...) - - if len(buffer)%ctx.blockCipher.BlockSize() > 0 { - return nil, errors.New("go-jose/go-jose: invalid ciphertext (invalid length)") - } - - cbc.CryptBlocks(buffer, buffer) - - // Remove padding - plaintext, err := unpadBuffer(buffer, ctx.blockCipher.BlockSize()) - if err != nil { - return nil, err - } - - ret, out := resize(dst, uint64(len(dst))+uint64(len(plaintext))) - copy(out, plaintext) - - return ret, nil -} - -// Compute an authentication tag -func (ctx *cbcAEAD) computeAuthTag(aad, nonce, ciphertext []byte) []byte { - buffer := make([]byte, uint64(len(aad))+uint64(len(nonce))+uint64(len(ciphertext))+8) - n := 0 - n += copy(buffer, aad) - n += copy(buffer[n:], nonce) - n += copy(buffer[n:], ciphertext) - binary.BigEndian.PutUint64(buffer[n:], uint64(len(aad))*8) - - // According to documentation, Write() on hash.Hash never fails. - hmac := hmac.New(ctx.hash, ctx.integrityKey) - _, _ = hmac.Write(buffer) - - return hmac.Sum(nil)[:ctx.authtagBytes] -} - -// resize ensures that the given slice has a capacity of at least n bytes. -// If the capacity of the slice is less than n, a new slice is allocated -// and the existing data will be copied. -func resize(in []byte, n uint64) (head, tail []byte) { - if uint64(cap(in)) >= n { - head = in[:n] - } else { - head = make([]byte, n) - copy(head, in) - } - - tail = head[len(in):] - return -} - -// Apply padding -func padBuffer(buffer []byte, blockSize int) []byte { - missing := blockSize - (len(buffer) % blockSize) - ret, out := resize(buffer, uint64(len(buffer))+uint64(missing)) - padding := bytes.Repeat([]byte{byte(missing)}, missing) - copy(out, padding) - return ret -} - -// Remove padding -func unpadBuffer(buffer []byte, blockSize int) ([]byte, error) { - if len(buffer)%blockSize != 0 { - return nil, errors.New("go-jose/go-jose: invalid padding") - } - - last := buffer[len(buffer)-1] - count := int(last) - - if count == 0 || count > blockSize || count > len(buffer) { - return nil, errors.New("go-jose/go-jose: invalid padding") - } - - padding := bytes.Repeat([]byte{last}, count) - if !bytes.HasSuffix(buffer, padding) { - return nil, errors.New("go-jose/go-jose: invalid padding") - } - - return buffer[:len(buffer)-count], nil -} diff --git a/vendor/github.com/go-jose/go-jose/v4/cipher/concat_kdf.go b/vendor/github.com/go-jose/go-jose/v4/cipher/concat_kdf.go deleted file mode 100644 index f62c3bdba5..0000000000 --- a/vendor/github.com/go-jose/go-jose/v4/cipher/concat_kdf.go +++ /dev/null @@ -1,75 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package josecipher - -import ( - "crypto" - "encoding/binary" - "hash" - "io" -) - -type concatKDF struct { - z, info []byte - i uint32 - cache []byte - hasher hash.Hash -} - -// NewConcatKDF builds a KDF reader based on the given inputs. -func NewConcatKDF(hash crypto.Hash, z, algID, ptyUInfo, ptyVInfo, supPubInfo, supPrivInfo []byte) io.Reader { - buffer := make([]byte, uint64(len(algID))+uint64(len(ptyUInfo))+uint64(len(ptyVInfo))+uint64(len(supPubInfo))+uint64(len(supPrivInfo))) - n := 0 - n += copy(buffer, algID) - n += copy(buffer[n:], ptyUInfo) - n += copy(buffer[n:], ptyVInfo) - n += copy(buffer[n:], supPubInfo) - copy(buffer[n:], supPrivInfo) - - hasher := hash.New() - - return &concatKDF{ - z: z, - info: buffer, - hasher: hasher, - cache: []byte{}, - i: 1, - } -} - -func (ctx *concatKDF) Read(out []byte) (int, error) { - copied := copy(out, ctx.cache) - ctx.cache = ctx.cache[copied:] - - for copied < len(out) { - ctx.hasher.Reset() - - // Write on a hash.Hash never fails - _ = binary.Write(ctx.hasher, binary.BigEndian, ctx.i) - _, _ = ctx.hasher.Write(ctx.z) - _, _ = ctx.hasher.Write(ctx.info) - - hash := ctx.hasher.Sum(nil) - chunkCopied := copy(out[copied:], hash) - copied += chunkCopied - ctx.cache = hash[chunkCopied:] - - ctx.i++ - } - - return copied, nil -} diff --git a/vendor/github.com/go-jose/go-jose/v4/cipher/ecdh_es.go b/vendor/github.com/go-jose/go-jose/v4/cipher/ecdh_es.go deleted file mode 100644 index 093c646740..0000000000 --- a/vendor/github.com/go-jose/go-jose/v4/cipher/ecdh_es.go +++ /dev/null @@ -1,86 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package josecipher - -import ( - "bytes" - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - "encoding/binary" -) - -// DeriveECDHES derives a shared encryption key using ECDH/ConcatKDF as described in JWE/JWA. -// It is an error to call this function with a private/public key that are not on the same -// curve. Callers must ensure that the keys are valid before calling this function. Output -// size may be at most 1<<16 bytes (64 KiB). -func DeriveECDHES(alg string, apuData, apvData []byte, priv *ecdsa.PrivateKey, pub *ecdsa.PublicKey, size int) []byte { - if size > 1<<16 { - panic("ECDH-ES output size too large, must be less than or equal to 1<<16") - } - - // algId, partyUInfo, partyVInfo inputs must be prefixed with the length - algID := lengthPrefixed([]byte(alg)) - ptyUInfo := lengthPrefixed(apuData) - ptyVInfo := lengthPrefixed(apvData) - - // suppPubInfo is the encoded length of the output size in bits - supPubInfo := make([]byte, 4) - binary.BigEndian.PutUint32(supPubInfo, uint32(size)*8) - - if !priv.PublicKey.Curve.IsOnCurve(pub.X, pub.Y) { - panic("public key not on same curve as private key") - } - - z, _ := priv.Curve.ScalarMult(pub.X, pub.Y, priv.D.Bytes()) - zBytes := z.Bytes() - - // Note that calling z.Bytes() on a big.Int may strip leading zero bytes from - // the returned byte array. This can lead to a problem where zBytes will be - // shorter than expected which breaks the key derivation. Therefore we must pad - // to the full length of the expected coordinate here before calling the KDF. - octSize := dSize(priv.Curve) - if len(zBytes) != octSize { - zBytes = append(bytes.Repeat([]byte{0}, octSize-len(zBytes)), zBytes...) - } - - reader := NewConcatKDF(crypto.SHA256, zBytes, algID, ptyUInfo, ptyVInfo, supPubInfo, []byte{}) - key := make([]byte, size) - - // Read on the KDF will never fail - _, _ = reader.Read(key) - - return key -} - -// dSize returns the size in octets for a coordinate on a elliptic curve. -func dSize(curve elliptic.Curve) int { - order := curve.Params().P - bitLen := order.BitLen() - size := bitLen / 8 - if bitLen%8 != 0 { - size++ - } - return size -} - -func lengthPrefixed(data []byte) []byte { - out := make([]byte, len(data)+4) - binary.BigEndian.PutUint32(out, uint32(len(data))) - copy(out[4:], data) - return out -} diff --git a/vendor/github.com/go-jose/go-jose/v4/cipher/key_wrap.go b/vendor/github.com/go-jose/go-jose/v4/cipher/key_wrap.go deleted file mode 100644 index b9effbca8a..0000000000 --- a/vendor/github.com/go-jose/go-jose/v4/cipher/key_wrap.go +++ /dev/null @@ -1,109 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package josecipher - -import ( - "crypto/cipher" - "crypto/subtle" - "encoding/binary" - "errors" -) - -var defaultIV = []byte{0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6} - -// KeyWrap implements NIST key wrapping; it wraps a content encryption key (cek) with the given block cipher. -func KeyWrap(block cipher.Block, cek []byte) ([]byte, error) { - if len(cek)%8 != 0 { - return nil, errors.New("go-jose/go-jose: key wrap input must be 8 byte blocks") - } - - n := len(cek) / 8 - r := make([][]byte, n) - - for i := range r { - r[i] = make([]byte, 8) - copy(r[i], cek[i*8:]) - } - - buffer := make([]byte, 16) - tBytes := make([]byte, 8) - copy(buffer, defaultIV) - - for t := 0; t < 6*n; t++ { - copy(buffer[8:], r[t%n]) - - block.Encrypt(buffer, buffer) - - binary.BigEndian.PutUint64(tBytes, uint64(t+1)) - - for i := 0; i < 8; i++ { - buffer[i] ^= tBytes[i] - } - copy(r[t%n], buffer[8:]) - } - - out := make([]byte, (n+1)*8) - copy(out, buffer[:8]) - for i := range r { - copy(out[(i+1)*8:], r[i]) - } - - return out, nil -} - -// KeyUnwrap implements NIST key unwrapping; it unwraps a content encryption key (cek) with the given block cipher. -func KeyUnwrap(block cipher.Block, ciphertext []byte) ([]byte, error) { - if len(ciphertext)%8 != 0 { - return nil, errors.New("go-jose/go-jose: key wrap input must be 8 byte blocks") - } - - n := (len(ciphertext) / 8) - 1 - r := make([][]byte, n) - - for i := range r { - r[i] = make([]byte, 8) - copy(r[i], ciphertext[(i+1)*8:]) - } - - buffer := make([]byte, 16) - tBytes := make([]byte, 8) - copy(buffer[:8], ciphertext[:8]) - - for t := 6*n - 1; t >= 0; t-- { - binary.BigEndian.PutUint64(tBytes, uint64(t+1)) - - for i := 0; i < 8; i++ { - buffer[i] ^= tBytes[i] - } - copy(buffer[8:], r[t%n]) - - block.Decrypt(buffer, buffer) - - copy(r[t%n], buffer[8:]) - } - - if subtle.ConstantTimeCompare(buffer[:8], defaultIV) == 0 { - return nil, errors.New("go-jose/go-jose: failed to unwrap key") - } - - out := make([]byte, n*8) - for i := range r { - copy(out[i*8:], r[i]) - } - - return out, nil -} diff --git a/vendor/github.com/go-jose/go-jose/v4/crypter.go b/vendor/github.com/go-jose/go-jose/v4/crypter.go deleted file mode 100644 index d81b03b447..0000000000 --- a/vendor/github.com/go-jose/go-jose/v4/crypter.go +++ /dev/null @@ -1,599 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package jose - -import ( - "crypto/ecdsa" - "crypto/rsa" - "errors" - "fmt" - - "github.com/go-jose/go-jose/v4/json" -) - -// Encrypter represents an encrypter which produces an encrypted JWE object. -type Encrypter interface { - Encrypt(plaintext []byte) (*JSONWebEncryption, error) - EncryptWithAuthData(plaintext []byte, aad []byte) (*JSONWebEncryption, error) - Options() EncrypterOptions -} - -// A generic content cipher -type contentCipher interface { - keySize() int - encrypt(cek []byte, aad, plaintext []byte) (*aeadParts, error) - decrypt(cek []byte, aad []byte, parts *aeadParts) ([]byte, error) -} - -// A key generator (for generating/getting a CEK) -type keyGenerator interface { - keySize() int - genKey() ([]byte, rawHeader, error) -} - -// A generic key encrypter -type keyEncrypter interface { - encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) // Encrypt a key -} - -// A generic key decrypter -type keyDecrypter interface { - decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) // Decrypt a key -} - -// A generic encrypter based on the given key encrypter and content cipher. -type genericEncrypter struct { - contentAlg ContentEncryption - compressionAlg CompressionAlgorithm - cipher contentCipher - recipients []recipientKeyInfo - keyGenerator keyGenerator - extraHeaders map[HeaderKey]interface{} -} - -type recipientKeyInfo struct { - keyID string - keyAlg KeyAlgorithm - keyEncrypter keyEncrypter -} - -// EncrypterOptions represents options that can be set on new encrypters. -type EncrypterOptions struct { - Compression CompressionAlgorithm - - // Optional map of name/value pairs to be inserted into the protected - // header of a JWS object. Some specifications which make use of - // JWS require additional values here. - // - // Values will be serialized by [json.Marshal] and must be valid inputs to - // that function. - // - // [json.Marshal]: https://pkg.go.dev/encoding/json#Marshal - ExtraHeaders map[HeaderKey]interface{} -} - -// WithHeader adds an arbitrary value to the ExtraHeaders map, initializing it -// if necessary, and returns the updated EncrypterOptions. -// -// The v parameter will be serialized by [json.Marshal] and must be a valid -// input to that function. -// -// [json.Marshal]: https://pkg.go.dev/encoding/json#Marshal -func (eo *EncrypterOptions) WithHeader(k HeaderKey, v interface{}) *EncrypterOptions { - if eo.ExtraHeaders == nil { - eo.ExtraHeaders = map[HeaderKey]interface{}{} - } - eo.ExtraHeaders[k] = v - return eo -} - -// WithContentType adds a content type ("cty") header and returns the updated -// EncrypterOptions. -func (eo *EncrypterOptions) WithContentType(contentType ContentType) *EncrypterOptions { - return eo.WithHeader(HeaderContentType, contentType) -} - -// WithType adds a type ("typ") header and returns the updated EncrypterOptions. -func (eo *EncrypterOptions) WithType(typ ContentType) *EncrypterOptions { - return eo.WithHeader(HeaderType, typ) -} - -// Recipient represents an algorithm/key to encrypt messages to. -// -// PBES2Count and PBES2Salt correspond with the "p2c" and "p2s" headers used -// on the password-based encryption algorithms PBES2-HS256+A128KW, -// PBES2-HS384+A192KW, and PBES2-HS512+A256KW. If they are not provided a safe -// default of 100000 will be used for the count and a 128-bit random salt will -// be generated. -type Recipient struct { - Algorithm KeyAlgorithm - // Key must have one of these types: - // - ed25519.PublicKey - // - *ecdsa.PublicKey - // - *rsa.PublicKey - // - *JSONWebKey - // - JSONWebKey - // - []byte (a symmetric key) - // - Any type that satisfies the OpaqueKeyEncrypter interface - // - // The type of Key must match the value of Algorithm. - Key interface{} - KeyID string - PBES2Count int - PBES2Salt []byte -} - -// NewEncrypter creates an appropriate encrypter based on the key type -func NewEncrypter(enc ContentEncryption, rcpt Recipient, opts *EncrypterOptions) (Encrypter, error) { - encrypter := &genericEncrypter{ - contentAlg: enc, - recipients: []recipientKeyInfo{}, - cipher: getContentCipher(enc), - } - if opts != nil { - encrypter.compressionAlg = opts.Compression - encrypter.extraHeaders = opts.ExtraHeaders - } - - if encrypter.cipher == nil { - return nil, ErrUnsupportedAlgorithm - } - - var keyID string - var rawKey interface{} - switch encryptionKey := rcpt.Key.(type) { - case JSONWebKey: - keyID, rawKey = encryptionKey.KeyID, encryptionKey.Key - case *JSONWebKey: - keyID, rawKey = encryptionKey.KeyID, encryptionKey.Key - case OpaqueKeyEncrypter: - keyID, rawKey = encryptionKey.KeyID(), encryptionKey - default: - rawKey = encryptionKey - } - - switch rcpt.Algorithm { - case DIRECT: - // Direct encryption mode must be treated differently - keyBytes, ok := rawKey.([]byte) - if !ok { - return nil, ErrUnsupportedKeyType - } - if encrypter.cipher.keySize() != len(keyBytes) { - return nil, ErrInvalidKeySize - } - encrypter.keyGenerator = staticKeyGenerator{ - key: keyBytes, - } - recipientInfo, _ := newSymmetricRecipient(rcpt.Algorithm, keyBytes) - recipientInfo.keyID = keyID - if rcpt.KeyID != "" { - recipientInfo.keyID = rcpt.KeyID - } - encrypter.recipients = []recipientKeyInfo{recipientInfo} - return encrypter, nil - case ECDH_ES: - // ECDH-ES (w/o key wrapping) is similar to DIRECT mode - keyDSA, ok := rawKey.(*ecdsa.PublicKey) - if !ok { - return nil, ErrUnsupportedKeyType - } - encrypter.keyGenerator = ecKeyGenerator{ - size: encrypter.cipher.keySize(), - algID: string(enc), - publicKey: keyDSA, - } - recipientInfo, _ := newECDHRecipient(rcpt.Algorithm, keyDSA) - recipientInfo.keyID = keyID - if rcpt.KeyID != "" { - recipientInfo.keyID = rcpt.KeyID - } - encrypter.recipients = []recipientKeyInfo{recipientInfo} - return encrypter, nil - default: - // Can just add a standard recipient - encrypter.keyGenerator = randomKeyGenerator{ - size: encrypter.cipher.keySize(), - } - err := encrypter.addRecipient(rcpt) - return encrypter, err - } -} - -// NewMultiEncrypter creates a multi-encrypter based on the given parameters -func NewMultiEncrypter(enc ContentEncryption, rcpts []Recipient, opts *EncrypterOptions) (Encrypter, error) { - cipher := getContentCipher(enc) - - if cipher == nil { - return nil, ErrUnsupportedAlgorithm - } - if len(rcpts) == 0 { - return nil, fmt.Errorf("go-jose/go-jose: recipients is nil or empty") - } - - encrypter := &genericEncrypter{ - contentAlg: enc, - recipients: []recipientKeyInfo{}, - cipher: cipher, - keyGenerator: randomKeyGenerator{ - size: cipher.keySize(), - }, - } - - if opts != nil { - encrypter.compressionAlg = opts.Compression - encrypter.extraHeaders = opts.ExtraHeaders - } - - for _, recipient := range rcpts { - err := encrypter.addRecipient(recipient) - if err != nil { - return nil, err - } - } - - return encrypter, nil -} - -func (ctx *genericEncrypter) addRecipient(recipient Recipient) (err error) { - var recipientInfo recipientKeyInfo - - switch recipient.Algorithm { - case DIRECT, ECDH_ES: - return fmt.Errorf("go-jose/go-jose: key algorithm '%s' not supported in multi-recipient mode", recipient.Algorithm) - } - - recipientInfo, err = makeJWERecipient(recipient.Algorithm, recipient.Key) - if recipient.KeyID != "" { - recipientInfo.keyID = recipient.KeyID - } - - switch recipient.Algorithm { - case PBES2_HS256_A128KW, PBES2_HS384_A192KW, PBES2_HS512_A256KW: - if sr, ok := recipientInfo.keyEncrypter.(*symmetricKeyCipher); ok { - sr.p2c = recipient.PBES2Count - sr.p2s = recipient.PBES2Salt - } - } - - if err == nil { - ctx.recipients = append(ctx.recipients, recipientInfo) - } - return err -} - -func makeJWERecipient(alg KeyAlgorithm, encryptionKey interface{}) (recipientKeyInfo, error) { - switch encryptionKey := encryptionKey.(type) { - case *rsa.PublicKey: - return newRSARecipient(alg, encryptionKey) - case *ecdsa.PublicKey: - return newECDHRecipient(alg, encryptionKey) - case []byte: - return newSymmetricRecipient(alg, encryptionKey) - case string: - return newSymmetricRecipient(alg, []byte(encryptionKey)) - case *JSONWebKey: - recipient, err := makeJWERecipient(alg, encryptionKey.Key) - recipient.keyID = encryptionKey.KeyID - return recipient, err - case OpaqueKeyEncrypter: - return newOpaqueKeyEncrypter(alg, encryptionKey) - } - return recipientKeyInfo{}, ErrUnsupportedKeyType -} - -// newDecrypter creates an appropriate decrypter based on the key type -func newDecrypter(decryptionKey interface{}) (keyDecrypter, error) { - switch decryptionKey := decryptionKey.(type) { - case *rsa.PrivateKey: - return &rsaDecrypterSigner{ - privateKey: decryptionKey, - }, nil - case *ecdsa.PrivateKey: - return &ecDecrypterSigner{ - privateKey: decryptionKey, - }, nil - case []byte: - return &symmetricKeyCipher{ - key: decryptionKey, - }, nil - case string: - return &symmetricKeyCipher{ - key: []byte(decryptionKey), - }, nil - case JSONWebKey: - return newDecrypter(decryptionKey.Key) - case *JSONWebKey: - return newDecrypter(decryptionKey.Key) - case OpaqueKeyDecrypter: - return &opaqueKeyDecrypter{decrypter: decryptionKey}, nil - default: - return nil, ErrUnsupportedKeyType - } -} - -// Implementation of encrypt method producing a JWE object. -func (ctx *genericEncrypter) Encrypt(plaintext []byte) (*JSONWebEncryption, error) { - return ctx.EncryptWithAuthData(plaintext, nil) -} - -// Implementation of encrypt method producing a JWE object. -func (ctx *genericEncrypter) EncryptWithAuthData(plaintext, aad []byte) (*JSONWebEncryption, error) { - obj := &JSONWebEncryption{} - obj.aad = aad - - obj.protected = &rawHeader{} - err := obj.protected.set(headerEncryption, ctx.contentAlg) - if err != nil { - return nil, err - } - - obj.recipients = make([]recipientInfo, len(ctx.recipients)) - - if len(ctx.recipients) == 0 { - return nil, fmt.Errorf("go-jose/go-jose: no recipients to encrypt to") - } - - cek, headers, err := ctx.keyGenerator.genKey() - if err != nil { - return nil, err - } - - obj.protected.merge(&headers) - - for i, info := range ctx.recipients { - recipient, err := info.keyEncrypter.encryptKey(cek, info.keyAlg) - if err != nil { - return nil, err - } - - err = recipient.header.set(headerAlgorithm, info.keyAlg) - if err != nil { - return nil, err - } - - if info.keyID != "" { - err = recipient.header.set(headerKeyID, info.keyID) - if err != nil { - return nil, err - } - } - obj.recipients[i] = recipient - } - - if len(ctx.recipients) == 1 { - // Move per-recipient headers into main protected header if there's - // only a single recipient. - obj.protected.merge(obj.recipients[0].header) - obj.recipients[0].header = nil - } - - if ctx.compressionAlg != NONE { - plaintext, err = compress(ctx.compressionAlg, plaintext) - if err != nil { - return nil, err - } - - err = obj.protected.set(headerCompression, ctx.compressionAlg) - if err != nil { - return nil, err - } - } - - for k, v := range ctx.extraHeaders { - b, err := json.Marshal(v) - if err != nil { - return nil, err - } - (*obj.protected)[k] = makeRawMessage(b) - } - - authData := obj.computeAuthData() - parts, err := ctx.cipher.encrypt(cek, authData, plaintext) - if err != nil { - return nil, err - } - - obj.iv = parts.iv - obj.ciphertext = parts.ciphertext - obj.tag = parts.tag - - return obj, nil -} - -func (ctx *genericEncrypter) Options() EncrypterOptions { - return EncrypterOptions{ - Compression: ctx.compressionAlg, - ExtraHeaders: ctx.extraHeaders, - } -} - -// Decrypt and validate the object and return the plaintext. This -// function does not support multi-recipient. If you desire multi-recipient -// decryption use DecryptMulti instead. -// -// The decryptionKey argument must contain a private or symmetric key -// and must have one of these types: -// - *ecdsa.PrivateKey -// - *rsa.PrivateKey -// - *JSONWebKey -// - JSONWebKey -// - *JSONWebKeySet -// - JSONWebKeySet -// - []byte (a symmetric key) -// - string (a symmetric key) -// - Any type that satisfies the OpaqueKeyDecrypter interface. -// -// Note that ed25519 is only available for signatures, not encryption, so is -// not an option here. -// -// Automatically decompresses plaintext, but returns an error if the decompressed -// data would be >250kB or >10x the size of the compressed data, whichever is larger. -func (obj JSONWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error) { - headers := obj.mergedHeaders(nil) - - if len(obj.recipients) > 1 { - return nil, errors.New("go-jose/go-jose: too many recipients in payload; expecting only one") - } - - critical, err := headers.getCritical() - if err != nil { - return nil, fmt.Errorf("go-jose/go-jose: invalid crit header") - } - - if len(critical) > 0 { - return nil, fmt.Errorf("go-jose/go-jose: unsupported crit header") - } - - key, err := tryJWKS(decryptionKey, obj.Header) - if err != nil { - return nil, err - } - decrypter, err := newDecrypter(key) - if err != nil { - return nil, err - } - - cipher := getContentCipher(headers.getEncryption()) - if cipher == nil { - return nil, fmt.Errorf("go-jose/go-jose: unsupported enc value '%s'", string(headers.getEncryption())) - } - - generator := randomKeyGenerator{ - size: cipher.keySize(), - } - - parts := &aeadParts{ - iv: obj.iv, - ciphertext: obj.ciphertext, - tag: obj.tag, - } - - authData := obj.computeAuthData() - - var plaintext []byte - recipient := obj.recipients[0] - recipientHeaders := obj.mergedHeaders(&recipient) - - cek, err := decrypter.decryptKey(recipientHeaders, &recipient, generator) - if err == nil { - // Found a valid CEK -- let's try to decrypt. - plaintext, err = cipher.decrypt(cek, authData, parts) - } - - if plaintext == nil { - return nil, ErrCryptoFailure - } - - // The "zip" header parameter may only be present in the protected header. - if comp := obj.protected.getCompression(); comp != "" { - plaintext, err = decompress(comp, plaintext) - if err != nil { - return nil, fmt.Errorf("go-jose/go-jose: failed to decompress plaintext: %v", err) - } - } - - return plaintext, nil -} - -// DecryptMulti decrypts and validates the object and returns the plaintexts, -// with support for multiple recipients. It returns the index of the recipient -// for which the decryption was successful, the merged headers for that recipient, -// and the plaintext. -// -// The decryptionKey argument must have one of the types allowed for the -// decryptionKey argument of Decrypt(). -// -// Automatically decompresses plaintext, but returns an error if the decompressed -// data would be >250kB or >3x the size of the compressed data, whichever is larger. -func (obj JSONWebEncryption) DecryptMulti(decryptionKey interface{}) (int, Header, []byte, error) { - globalHeaders := obj.mergedHeaders(nil) - - critical, err := globalHeaders.getCritical() - if err != nil { - return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: invalid crit header") - } - - if len(critical) > 0 { - return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: unsupported crit header") - } - - key, err := tryJWKS(decryptionKey, obj.Header) - if err != nil { - return -1, Header{}, nil, err - } - decrypter, err := newDecrypter(key) - if err != nil { - return -1, Header{}, nil, err - } - - encryption := globalHeaders.getEncryption() - cipher := getContentCipher(encryption) - if cipher == nil { - return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: unsupported enc value '%s'", string(encryption)) - } - - generator := randomKeyGenerator{ - size: cipher.keySize(), - } - - parts := &aeadParts{ - iv: obj.iv, - ciphertext: obj.ciphertext, - tag: obj.tag, - } - - authData := obj.computeAuthData() - - index := -1 - var plaintext []byte - var headers rawHeader - - for i, recipient := range obj.recipients { - recipientHeaders := obj.mergedHeaders(&recipient) - - cek, err := decrypter.decryptKey(recipientHeaders, &recipient, generator) - if err == nil { - // Found a valid CEK -- let's try to decrypt. - plaintext, err = cipher.decrypt(cek, authData, parts) - if err == nil { - index = i - headers = recipientHeaders - break - } - } - } - - if plaintext == nil { - return -1, Header{}, nil, ErrCryptoFailure - } - - // The "zip" header parameter may only be present in the protected header. - if comp := obj.protected.getCompression(); comp != "" { - plaintext, err = decompress(comp, plaintext) - if err != nil { - return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: failed to decompress plaintext: %v", err) - } - } - - sanitized, err := headers.sanitized() - if err != nil { - return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: failed to sanitize header: %v", err) - } - - return index, sanitized, plaintext, err -} diff --git a/vendor/github.com/go-jose/go-jose/v4/encoding.go b/vendor/github.com/go-jose/go-jose/v4/encoding.go deleted file mode 100644 index 4f6e0d4a5c..0000000000 --- a/vendor/github.com/go-jose/go-jose/v4/encoding.go +++ /dev/null @@ -1,228 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package jose - -import ( - "bytes" - "compress/flate" - "encoding/base64" - "encoding/binary" - "fmt" - "io" - "math/big" - "strings" - "unicode" - - "github.com/go-jose/go-jose/v4/json" -) - -// Helper function to serialize known-good objects. -// Precondition: value is not a nil pointer. -func mustSerializeJSON(value interface{}) []byte { - out, err := json.Marshal(value) - if err != nil { - panic(err) - } - // We never want to serialize the top-level value "null," since it's not a - // valid JOSE message. But if a caller passes in a nil pointer to this method, - // MarshalJSON will happily serialize it as the top-level value "null". If - // that value is then embedded in another operation, for instance by being - // base64-encoded and fed as input to a signing algorithm - // (https://github.com/go-jose/go-jose/issues/22), the result will be - // incorrect. Because this method is intended for known-good objects, and a nil - // pointer is not a known-good object, we are free to panic in this case. - // Note: It's not possible to directly check whether the data pointed at by an - // interface is a nil pointer, so we do this hacky workaround. - // https://groups.google.com/forum/#!topic/golang-nuts/wnH302gBa4I - if string(out) == "null" { - panic("Tried to serialize a nil pointer.") - } - return out -} - -// Strip all newlines and whitespace -func stripWhitespace(data string) string { - buf := strings.Builder{} - buf.Grow(len(data)) - for _, r := range data { - if !unicode.IsSpace(r) { - buf.WriteRune(r) - } - } - return buf.String() -} - -// Perform compression based on algorithm -func compress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) { - switch algorithm { - case DEFLATE: - return deflate(input) - default: - return nil, ErrUnsupportedAlgorithm - } -} - -// Perform decompression based on algorithm -func decompress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) { - switch algorithm { - case DEFLATE: - return inflate(input) - default: - return nil, ErrUnsupportedAlgorithm - } -} - -// deflate compresses the input. -func deflate(input []byte) ([]byte, error) { - output := new(bytes.Buffer) - - // Writing to byte buffer, err is always nil - writer, _ := flate.NewWriter(output, 1) - _, _ = io.Copy(writer, bytes.NewBuffer(input)) - - err := writer.Close() - return output.Bytes(), err -} - -// inflate decompresses the input. -// -// Errors if the decompressed data would be >250kB or >10x the size of the -// compressed data, whichever is larger. -func inflate(input []byte) ([]byte, error) { - output := new(bytes.Buffer) - reader := flate.NewReader(bytes.NewBuffer(input)) - - maxCompressedSize := max(250_000, 10*int64(len(input))) - - limit := maxCompressedSize + 1 - n, err := io.CopyN(output, reader, limit) - if err != nil && err != io.EOF { - return nil, err - } - if n == limit { - return nil, fmt.Errorf("uncompressed data would be too large (>%d bytes)", maxCompressedSize) - } - - err = reader.Close() - return output.Bytes(), err -} - -// byteBuffer represents a slice of bytes that can be serialized to url-safe base64. -type byteBuffer struct { - data []byte -} - -func newBuffer(data []byte) *byteBuffer { - if data == nil { - return nil - } - return &byteBuffer{ - data: data, - } -} - -func newFixedSizeBuffer(data []byte, length int) *byteBuffer { - if len(data) > length { - panic("go-jose/go-jose: invalid call to newFixedSizeBuffer (len(data) > length)") - } - pad := make([]byte, length-len(data)) - return newBuffer(append(pad, data...)) -} - -func newBufferFromInt(num uint64) *byteBuffer { - data := make([]byte, 8) - binary.BigEndian.PutUint64(data, num) - return newBuffer(bytes.TrimLeft(data, "\x00")) -} - -func (b *byteBuffer) MarshalJSON() ([]byte, error) { - return json.Marshal(b.base64()) -} - -func (b *byteBuffer) UnmarshalJSON(data []byte) error { - var encoded string - err := json.Unmarshal(data, &encoded) - if err != nil { - return err - } - - if encoded == "" { - return nil - } - - decoded, err := base64.RawURLEncoding.DecodeString(encoded) - if err != nil { - return err - } - - *b = *newBuffer(decoded) - - return nil -} - -func (b *byteBuffer) base64() string { - return base64.RawURLEncoding.EncodeToString(b.data) -} - -func (b *byteBuffer) bytes() []byte { - // Handling nil here allows us to transparently handle nil slices when serializing. - if b == nil { - return nil - } - return b.data -} - -func (b byteBuffer) bigInt() *big.Int { - return new(big.Int).SetBytes(b.data) -} - -func (b byteBuffer) toInt() int { - return int(b.bigInt().Int64()) -} - -func base64EncodeLen(sl []byte) int { - return base64.RawURLEncoding.EncodedLen(len(sl)) -} - -func base64JoinWithDots(inputs ...[]byte) string { - if len(inputs) == 0 { - return "" - } - - // Count of dots. - totalCount := len(inputs) - 1 - - for _, input := range inputs { - totalCount += base64EncodeLen(input) - } - - out := make([]byte, totalCount) - startEncode := 0 - for i, input := range inputs { - base64.RawURLEncoding.Encode(out[startEncode:], input) - - if i == len(inputs)-1 { - continue - } - - startEncode += base64EncodeLen(input) - out[startEncode] = '.' - startEncode++ - } - - return string(out) -} diff --git a/vendor/github.com/go-jose/go-jose/v4/json/LICENSE b/vendor/github.com/go-jose/go-jose/v4/json/LICENSE deleted file mode 100644 index 7448756763..0000000000 --- a/vendor/github.com/go-jose/go-jose/v4/json/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/go-jose/go-jose/v4/json/README.md b/vendor/github.com/go-jose/go-jose/v4/json/README.md deleted file mode 100644 index 86de5e5581..0000000000 --- a/vendor/github.com/go-jose/go-jose/v4/json/README.md +++ /dev/null @@ -1,13 +0,0 @@ -# Safe JSON - -This repository contains a fork of the `encoding/json` package from Go 1.6. - -The following changes were made: - -* Object deserialization uses case-sensitive member name matching instead of - [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html). - This is to avoid differences in the interpretation of JOSE messages between - go-jose and libraries written in other languages. -* When deserializing a JSON object, we check for duplicate keys and reject the - input whenever we detect a duplicate. Rather than trying to work with malformed - data, we prefer to reject it right away. diff --git a/vendor/github.com/go-jose/go-jose/v4/json/decode.go b/vendor/github.com/go-jose/go-jose/v4/json/decode.go deleted file mode 100644 index 50634dd847..0000000000 --- a/vendor/github.com/go-jose/go-jose/v4/json/decode.go +++ /dev/null @@ -1,1216 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Represents JSON data structure using native Go types: booleans, floats, -// strings, arrays, and maps. - -package json - -import ( - "bytes" - "encoding" - "encoding/base64" - "errors" - "fmt" - "math" - "reflect" - "runtime" - "strconv" - "unicode" - "unicode/utf16" - "unicode/utf8" -) - -// Unmarshal parses the JSON-encoded data and stores the result -// in the value pointed to by v. -// -// Unmarshal uses the inverse of the encodings that -// Marshal uses, allocating maps, slices, and pointers as necessary, -// with the following additional rules: -// -// To unmarshal JSON into a pointer, Unmarshal first handles the case of -// the JSON being the JSON literal null. In that case, Unmarshal sets -// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into -// the value pointed at by the pointer. If the pointer is nil, Unmarshal -// allocates a new value for it to point to. -// -// To unmarshal JSON into a struct, Unmarshal matches incoming object -// keys to the keys used by Marshal (either the struct field name or its tag), -// preferring an exact match but also accepting a case-insensitive match. -// Unmarshal will only set exported fields of the struct. -// -// To unmarshal JSON into an interface value, -// Unmarshal stores one of these in the interface value: -// -// bool, for JSON booleans -// float64, for JSON numbers -// string, for JSON strings -// []interface{}, for JSON arrays -// map[string]interface{}, for JSON objects -// nil for JSON null -// -// To unmarshal a JSON array into a slice, Unmarshal resets the slice length -// to zero and then appends each element to the slice. -// As a special case, to unmarshal an empty JSON array into a slice, -// Unmarshal replaces the slice with a new empty slice. -// -// To unmarshal a JSON array into a Go array, Unmarshal decodes -// JSON array elements into corresponding Go array elements. -// If the Go array is smaller than the JSON array, -// the additional JSON array elements are discarded. -// If the JSON array is smaller than the Go array, -// the additional Go array elements are set to zero values. -// -// To unmarshal a JSON object into a string-keyed map, Unmarshal first -// establishes a map to use, If the map is nil, Unmarshal allocates a new map. -// Otherwise Unmarshal reuses the existing map, keeping existing entries. -// Unmarshal then stores key-value pairs from the JSON object into the map. -// -// If a JSON value is not appropriate for a given target type, -// or if a JSON number overflows the target type, Unmarshal -// skips that field and completes the unmarshaling as best it can. -// If no more serious errors are encountered, Unmarshal returns -// an UnmarshalTypeError describing the earliest such error. -// -// The JSON null value unmarshals into an interface, map, pointer, or slice -// by setting that Go value to nil. Because null is often used in JSON to mean -// “not present,” unmarshaling a JSON null into any other Go type has no effect -// on the value and produces no error. -// -// When unmarshaling quoted strings, invalid UTF-8 or -// invalid UTF-16 surrogate pairs are not treated as an error. -// Instead, they are replaced by the Unicode replacement -// character U+FFFD. -func Unmarshal(data []byte, v interface{}) error { - // Check for well-formedness. - // Avoids filling out half a data structure - // before discovering a JSON syntax error. - var d decodeState - err := checkValid(data, &d.scan) - if err != nil { - return err - } - - d.init(data) - return d.unmarshal(v) -} - -// Unmarshaler is the interface implemented by objects -// that can unmarshal a JSON description of themselves. -// The input can be assumed to be a valid encoding of -// a JSON value. UnmarshalJSON must copy the JSON data -// if it wishes to retain the data after returning. -type Unmarshaler interface { - UnmarshalJSON([]byte) error -} - -// An UnmarshalTypeError describes a JSON value that was -// not appropriate for a value of a specific Go type. -type UnmarshalTypeError struct { - Value string // description of JSON value - "bool", "array", "number -5" - Type reflect.Type // type of Go value it could not be assigned to - Offset int64 // error occurred after reading Offset bytes -} - -func (e *UnmarshalTypeError) Error() string { - return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String() -} - -// An UnmarshalFieldError describes a JSON object key that -// led to an unexported (and therefore unwritable) struct field. -// (No longer used; kept for compatibility.) -type UnmarshalFieldError struct { - Key string - Type reflect.Type - Field reflect.StructField -} - -func (e *UnmarshalFieldError) Error() string { - return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String() -} - -// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal. -// (The argument to Unmarshal must be a non-nil pointer.) -type InvalidUnmarshalError struct { - Type reflect.Type -} - -func (e *InvalidUnmarshalError) Error() string { - if e.Type == nil { - return "json: Unmarshal(nil)" - } - - if e.Type.Kind() != reflect.Ptr { - return "json: Unmarshal(non-pointer " + e.Type.String() + ")" - } - return "json: Unmarshal(nil " + e.Type.String() + ")" -} - -func (d *decodeState) unmarshal(v interface{}) (err error) { - defer func() { - if r := recover(); r != nil { - if _, ok := r.(runtime.Error); ok { - panic(r) - } - err = r.(error) - } - }() - - rv := reflect.ValueOf(v) - if rv.Kind() != reflect.Ptr || rv.IsNil() { - return &InvalidUnmarshalError{reflect.TypeOf(v)} - } - - d.scan.reset() - // We decode rv not rv.Elem because the Unmarshaler interface - // test must be applied at the top level of the value. - d.value(rv) - return d.savedError -} - -// A Number represents a JSON number literal. -type Number string - -// String returns the literal text of the number. -func (n Number) String() string { return string(n) } - -// Float64 returns the number as a float64. -func (n Number) Float64() (float64, error) { - return strconv.ParseFloat(string(n), 64) -} - -// Int64 returns the number as an int64. -func (n Number) Int64() (int64, error) { - return strconv.ParseInt(string(n), 10, 64) -} - -// isValidNumber reports whether s is a valid JSON number literal. -func isValidNumber(s string) bool { - // This function implements the JSON numbers grammar. - // See https://tools.ietf.org/html/rfc7159#section-6 - // and http://json.org/number.gif - - if s == "" { - return false - } - - // Optional - - if s[0] == '-' { - s = s[1:] - if s == "" { - return false - } - } - - // Digits - switch { - default: - return false - - case s[0] == '0': - s = s[1:] - - case '1' <= s[0] && s[0] <= '9': - s = s[1:] - for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { - s = s[1:] - } - } - - // . followed by 1 or more digits. - if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' { - s = s[2:] - for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { - s = s[1:] - } - } - - // e or E followed by an optional - or + and - // 1 or more digits. - if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') { - s = s[1:] - if s[0] == '+' || s[0] == '-' { - s = s[1:] - if s == "" { - return false - } - } - for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { - s = s[1:] - } - } - - // Make sure we are at the end. - return s == "" -} - -type NumberUnmarshalType int - -const ( - // unmarshal a JSON number into an interface{} as a float64 - UnmarshalFloat NumberUnmarshalType = iota - // unmarshal a JSON number into an interface{} as a `json.Number` - UnmarshalJSONNumber - // unmarshal a JSON number into an interface{} as a int64 - // if value is an integer otherwise float64 - UnmarshalIntOrFloat -) - -// decodeState represents the state while decoding a JSON value. -type decodeState struct { - data []byte - off int // read offset in data - scan scanner - nextscan scanner // for calls to nextValue - savedError error - numberType NumberUnmarshalType -} - -// errPhase is used for errors that should not happen unless -// there is a bug in the JSON decoder or something is editing -// the data slice while the decoder executes. -var errPhase = errors.New("JSON decoder out of sync - data changing underfoot?") - -func (d *decodeState) init(data []byte) *decodeState { - d.data = data - d.off = 0 - d.savedError = nil - return d -} - -// error aborts the decoding by panicking with err. -func (d *decodeState) error(err error) { - panic(err) -} - -// saveError saves the first err it is called with, -// for reporting at the end of the unmarshal. -func (d *decodeState) saveError(err error) { - if d.savedError == nil { - d.savedError = err - } -} - -// next cuts off and returns the next full JSON value in d.data[d.off:]. -// The next value is known to be an object or array, not a literal. -func (d *decodeState) next() []byte { - c := d.data[d.off] - item, rest, err := nextValue(d.data[d.off:], &d.nextscan) - if err != nil { - d.error(err) - } - d.off = len(d.data) - len(rest) - - // Our scanner has seen the opening brace/bracket - // and thinks we're still in the middle of the object. - // invent a closing brace/bracket to get it out. - if c == '{' { - d.scan.step(&d.scan, '}') - } else { - d.scan.step(&d.scan, ']') - } - - return item -} - -// scanWhile processes bytes in d.data[d.off:] until it -// receives a scan code not equal to op. -// It updates d.off and returns the new scan code. -func (d *decodeState) scanWhile(op int) int { - var newOp int - for { - if d.off >= len(d.data) { - newOp = d.scan.eof() - d.off = len(d.data) + 1 // mark processed EOF with len+1 - } else { - c := d.data[d.off] - d.off++ - newOp = d.scan.step(&d.scan, c) - } - if newOp != op { - break - } - } - return newOp -} - -// value decodes a JSON value from d.data[d.off:] into the value. -// it updates d.off to point past the decoded value. -func (d *decodeState) value(v reflect.Value) { - if !v.IsValid() { - _, rest, err := nextValue(d.data[d.off:], &d.nextscan) - if err != nil { - d.error(err) - } - d.off = len(d.data) - len(rest) - - // d.scan thinks we're still at the beginning of the item. - // Feed in an empty string - the shortest, simplest value - - // so that it knows we got to the end of the value. - if d.scan.redo { - // rewind. - d.scan.redo = false - d.scan.step = stateBeginValue - } - d.scan.step(&d.scan, '"') - d.scan.step(&d.scan, '"') - - n := len(d.scan.parseState) - if n > 0 && d.scan.parseState[n-1] == parseObjectKey { - // d.scan thinks we just read an object key; finish the object - d.scan.step(&d.scan, ':') - d.scan.step(&d.scan, '"') - d.scan.step(&d.scan, '"') - d.scan.step(&d.scan, '}') - } - - return - } - - switch op := d.scanWhile(scanSkipSpace); op { - default: - d.error(errPhase) - - case scanBeginArray: - d.array(v) - - case scanBeginObject: - d.object(v) - - case scanBeginLiteral: - d.literal(v) - } -} - -type unquotedValue struct{} - -// valueQuoted is like value but decodes a -// quoted string literal or literal null into an interface value. -// If it finds anything other than a quoted string literal or null, -// valueQuoted returns unquotedValue{}. -func (d *decodeState) valueQuoted() interface{} { - switch op := d.scanWhile(scanSkipSpace); op { - default: - d.error(errPhase) - - case scanBeginArray: - d.array(reflect.Value{}) - - case scanBeginObject: - d.object(reflect.Value{}) - - case scanBeginLiteral: - switch v := d.literalInterface().(type) { - case nil, string: - return v - } - } - return unquotedValue{} -} - -// indirect walks down v allocating pointers as needed, -// until it gets to a non-pointer. -// if it encounters an Unmarshaler, indirect stops and returns that. -// if decodingNull is true, indirect stops at the last pointer so it can be set to nil. -func (d *decodeState) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { - // If v is a named type and is addressable, - // start with its address, so that if the type has pointer methods, - // we find them. - if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { - v = v.Addr() - } - for { - // Load value from interface, but only if the result will be - // usefully addressable. - if v.Kind() == reflect.Interface && !v.IsNil() { - e := v.Elem() - if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { - v = e - continue - } - } - - if v.Kind() != reflect.Ptr { - break - } - - if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { - break - } - if v.IsNil() { - v.Set(reflect.New(v.Type().Elem())) - } - if v.Type().NumMethod() > 0 { - if u, ok := v.Interface().(Unmarshaler); ok { - return u, nil, reflect.Value{} - } - if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { - return nil, u, reflect.Value{} - } - } - v = v.Elem() - } - return nil, nil, v -} - -// array consumes an array from d.data[d.off-1:], decoding into the value v. -// the first byte of the array ('[') has been read already. -func (d *decodeState) array(v reflect.Value) { - // Check for unmarshaler. - u, ut, pv := d.indirect(v, false) - if u != nil { - d.off-- - err := u.UnmarshalJSON(d.next()) - if err != nil { - d.error(err) - } - return - } - if ut != nil { - d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)}) - d.off-- - d.next() - return - } - - v = pv - - // Check type of target. - switch v.Kind() { - case reflect.Interface: - if v.NumMethod() == 0 { - // Decoding into nil interface? Switch to non-reflect code. - v.Set(reflect.ValueOf(d.arrayInterface())) - return - } - // Otherwise it's invalid. - fallthrough - default: - d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)}) - d.off-- - d.next() - return - case reflect.Array: - case reflect.Slice: - break - } - - i := 0 - for { - // Look ahead for ] - can only happen on first iteration. - op := d.scanWhile(scanSkipSpace) - if op == scanEndArray { - break - } - - // Back up so d.value can have the byte we just read. - d.off-- - d.scan.undo(op) - - // Get element of array, growing if necessary. - if v.Kind() == reflect.Slice { - // Grow slice if necessary - if i >= v.Cap() { - newcap := v.Cap() + v.Cap()/2 - if newcap < 4 { - newcap = 4 - } - newv := reflect.MakeSlice(v.Type(), v.Len(), newcap) - reflect.Copy(newv, v) - v.Set(newv) - } - if i >= v.Len() { - v.SetLen(i + 1) - } - } - - if i < v.Len() { - // Decode into element. - d.value(v.Index(i)) - } else { - // Ran out of fixed array: skip. - d.value(reflect.Value{}) - } - i++ - - // Next token must be , or ]. - op = d.scanWhile(scanSkipSpace) - if op == scanEndArray { - break - } - if op != scanArrayValue { - d.error(errPhase) - } - } - - if i < v.Len() { - if v.Kind() == reflect.Array { - // Array. Zero the rest. - z := reflect.Zero(v.Type().Elem()) - for ; i < v.Len(); i++ { - v.Index(i).Set(z) - } - } else { - v.SetLen(i) - } - } - if i == 0 && v.Kind() == reflect.Slice { - v.Set(reflect.MakeSlice(v.Type(), 0, 0)) - } -} - -var nullLiteral = []byte("null") - -// object consumes an object from d.data[d.off-1:], decoding into the value v. -// the first byte ('{') of the object has been read already. -func (d *decodeState) object(v reflect.Value) { - // Check for unmarshaler. - u, ut, pv := d.indirect(v, false) - if u != nil { - d.off-- - err := u.UnmarshalJSON(d.next()) - if err != nil { - d.error(err) - } - return - } - if ut != nil { - d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) - d.off-- - d.next() // skip over { } in input - return - } - v = pv - - // Decoding into nil interface? Switch to non-reflect code. - if v.Kind() == reflect.Interface && v.NumMethod() == 0 { - v.Set(reflect.ValueOf(d.objectInterface())) - return - } - - // Check type of target: struct or map[string]T - switch v.Kind() { - case reflect.Map: - // map must have string kind - t := v.Type() - if t.Key().Kind() != reflect.String { - d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) - d.off-- - d.next() // skip over { } in input - return - } - if v.IsNil() { - v.Set(reflect.MakeMap(t)) - } - case reflect.Struct: - - default: - d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) - d.off-- - d.next() // skip over { } in input - return - } - - var mapElem reflect.Value - keys := map[string]bool{} - - for { - // Read opening " of string key or closing }. - op := d.scanWhile(scanSkipSpace) - if op == scanEndObject { - // closing } - can only happen on first iteration. - break - } - if op != scanBeginLiteral { - d.error(errPhase) - } - - // Read key. - start := d.off - 1 - op = d.scanWhile(scanContinue) - item := d.data[start : d.off-1] - key, ok := unquote(item) - if !ok { - d.error(errPhase) - } - - // Check for duplicate keys. - _, ok = keys[key] - if !ok { - keys[key] = true - } else { - d.error(fmt.Errorf("json: duplicate key '%s' in object", key)) - } - - // Figure out field corresponding to key. - var subv reflect.Value - destring := false // whether the value is wrapped in a string to be decoded first - - if v.Kind() == reflect.Map { - elemType := v.Type().Elem() - if !mapElem.IsValid() { - mapElem = reflect.New(elemType).Elem() - } else { - mapElem.Set(reflect.Zero(elemType)) - } - subv = mapElem - } else { - var f *field - fields := cachedTypeFields(v.Type()) - for i := range fields { - ff := &fields[i] - if bytes.Equal(ff.nameBytes, []byte(key)) { - f = ff - break - } - } - if f != nil { - subv = v - destring = f.quoted - for _, i := range f.index { - if subv.Kind() == reflect.Ptr { - if subv.IsNil() { - subv.Set(reflect.New(subv.Type().Elem())) - } - subv = subv.Elem() - } - subv = subv.Field(i) - } - } - } - - // Read : before value. - if op == scanSkipSpace { - op = d.scanWhile(scanSkipSpace) - } - if op != scanObjectKey { - d.error(errPhase) - } - - // Read value. - if destring { - switch qv := d.valueQuoted().(type) { - case nil: - d.literalStore(nullLiteral, subv, false) - case string: - d.literalStore([]byte(qv), subv, true) - default: - d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type())) - } - } else { - d.value(subv) - } - - // Write value back to map; - // if using struct, subv points into struct already. - if v.Kind() == reflect.Map { - kv := reflect.ValueOf(key).Convert(v.Type().Key()) - v.SetMapIndex(kv, subv) - } - - // Next token must be , or }. - op = d.scanWhile(scanSkipSpace) - if op == scanEndObject { - break - } - if op != scanObjectValue { - d.error(errPhase) - } - } -} - -// literal consumes a literal from d.data[d.off-1:], decoding into the value v. -// The first byte of the literal has been read already -// (that's how the caller knows it's a literal). -func (d *decodeState) literal(v reflect.Value) { - // All bytes inside literal return scanContinue op code. - start := d.off - 1 - op := d.scanWhile(scanContinue) - - // Scan read one byte too far; back up. - d.off-- - d.scan.undo(op) - - d.literalStore(d.data[start:d.off], v, false) -} - -// convertNumber converts the number literal s to a float64, int64 or a Number -// depending on d.numberDecodeType. -func (d *decodeState) convertNumber(s string) (interface{}, error) { - switch d.numberType { - - case UnmarshalJSONNumber: - return Number(s), nil - case UnmarshalIntOrFloat: - v, err := strconv.ParseInt(s, 10, 64) - if err == nil { - return v, nil - } - - // tries to parse integer number in scientific notation - f, err := strconv.ParseFloat(s, 64) - if err != nil { - return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0), int64(d.off)} - } - - // if it has no decimal value use int64 - if fi, fd := math.Modf(f); fd == 0.0 { - return int64(fi), nil - } - return f, nil - default: - f, err := strconv.ParseFloat(s, 64) - if err != nil { - return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0), int64(d.off)} - } - return f, nil - } - -} - -var numberType = reflect.TypeOf(Number("")) - -// literalStore decodes a literal stored in item into v. -// -// fromQuoted indicates whether this literal came from unwrapping a -// string from the ",string" struct tag option. this is used only to -// produce more helpful error messages. -func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) { - // Check for unmarshaler. - if len(item) == 0 { - //Empty string given - d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - return - } - wantptr := item[0] == 'n' // null - u, ut, pv := d.indirect(v, wantptr) - if u != nil { - err := u.UnmarshalJSON(item) - if err != nil { - d.error(err) - } - return - } - if ut != nil { - if item[0] != '"' { - if fromQuoted { - d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) - } - return - } - s, ok := unquoteBytes(item) - if !ok { - if fromQuoted { - d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.error(errPhase) - } - } - err := ut.UnmarshalText(s) - if err != nil { - d.error(err) - } - return - } - - v = pv - - switch c := item[0]; c { - case 'n': // null - switch v.Kind() { - case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: - v.Set(reflect.Zero(v.Type())) - // otherwise, ignore null for primitives/string - } - case 't', 'f': // true, false - value := c == 't' - switch v.Kind() { - default: - if fromQuoted { - d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)}) - } - case reflect.Bool: - v.SetBool(value) - case reflect.Interface: - if v.NumMethod() == 0 { - v.Set(reflect.ValueOf(value)) - } else { - d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)}) - } - } - - case '"': // string - s, ok := unquoteBytes(item) - if !ok { - if fromQuoted { - d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.error(errPhase) - } - } - switch v.Kind() { - default: - d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) - case reflect.Slice: - if v.Type().Elem().Kind() != reflect.Uint8 { - d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) - break - } - b := make([]byte, base64.StdEncoding.DecodedLen(len(s))) - n, err := base64.StdEncoding.Decode(b, s) - if err != nil { - d.saveError(err) - break - } - v.SetBytes(b[:n]) - case reflect.String: - v.SetString(string(s)) - case reflect.Interface: - if v.NumMethod() == 0 { - v.Set(reflect.ValueOf(string(s))) - } else { - d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) - } - } - - default: // number - if c != '-' && (c < '0' || c > '9') { - if fromQuoted { - d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.error(errPhase) - } - } - s := string(item) - switch v.Kind() { - default: - if v.Kind() == reflect.String && v.Type() == numberType { - v.SetString(s) - if !isValidNumber(s) { - d.error(fmt.Errorf("json: invalid number literal, trying to unmarshal %q into Number", item)) - } - break - } - if fromQuoted { - d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.error(&UnmarshalTypeError{"number", v.Type(), int64(d.off)}) - } - case reflect.Interface: - n, err := d.convertNumber(s) - if err != nil { - d.saveError(err) - break - } - if v.NumMethod() != 0 { - d.saveError(&UnmarshalTypeError{"number", v.Type(), int64(d.off)}) - break - } - v.Set(reflect.ValueOf(n)) - - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - n, err := strconv.ParseInt(s, 10, 64) - if err != nil || v.OverflowInt(n) { - d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) - break - } - v.SetInt(n) - - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - n, err := strconv.ParseUint(s, 10, 64) - if err != nil || v.OverflowUint(n) { - d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) - break - } - v.SetUint(n) - - case reflect.Float32, reflect.Float64: - n, err := strconv.ParseFloat(s, v.Type().Bits()) - if err != nil || v.OverflowFloat(n) { - d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) - break - } - v.SetFloat(n) - } - } -} - -// The xxxInterface routines build up a value to be stored -// in an empty interface. They are not strictly necessary, -// but they avoid the weight of reflection in this common case. - -// valueInterface is like value but returns interface{} -func (d *decodeState) valueInterface() interface{} { - switch d.scanWhile(scanSkipSpace) { - default: - d.error(errPhase) - panic("unreachable") - case scanBeginArray: - return d.arrayInterface() - case scanBeginObject: - return d.objectInterface() - case scanBeginLiteral: - return d.literalInterface() - } -} - -// arrayInterface is like array but returns []interface{}. -func (d *decodeState) arrayInterface() []interface{} { - var v = make([]interface{}, 0) - for { - // Look ahead for ] - can only happen on first iteration. - op := d.scanWhile(scanSkipSpace) - if op == scanEndArray { - break - } - - // Back up so d.value can have the byte we just read. - d.off-- - d.scan.undo(op) - - v = append(v, d.valueInterface()) - - // Next token must be , or ]. - op = d.scanWhile(scanSkipSpace) - if op == scanEndArray { - break - } - if op != scanArrayValue { - d.error(errPhase) - } - } - return v -} - -// objectInterface is like object but returns map[string]interface{}. -func (d *decodeState) objectInterface() map[string]interface{} { - m := make(map[string]interface{}) - keys := map[string]bool{} - - for { - // Read opening " of string key or closing }. - op := d.scanWhile(scanSkipSpace) - if op == scanEndObject { - // closing } - can only happen on first iteration. - break - } - if op != scanBeginLiteral { - d.error(errPhase) - } - - // Read string key. - start := d.off - 1 - op = d.scanWhile(scanContinue) - item := d.data[start : d.off-1] - key, ok := unquote(item) - if !ok { - d.error(errPhase) - } - - // Check for duplicate keys. - _, ok = keys[key] - if !ok { - keys[key] = true - } else { - d.error(fmt.Errorf("json: duplicate key '%s' in object", key)) - } - - // Read : before value. - if op == scanSkipSpace { - op = d.scanWhile(scanSkipSpace) - } - if op != scanObjectKey { - d.error(errPhase) - } - - // Read value. - m[key] = d.valueInterface() - - // Next token must be , or }. - op = d.scanWhile(scanSkipSpace) - if op == scanEndObject { - break - } - if op != scanObjectValue { - d.error(errPhase) - } - } - return m -} - -// literalInterface is like literal but returns an interface value. -func (d *decodeState) literalInterface() interface{} { - // All bytes inside literal return scanContinue op code. - start := d.off - 1 - op := d.scanWhile(scanContinue) - - // Scan read one byte too far; back up. - d.off-- - d.scan.undo(op) - item := d.data[start:d.off] - - switch c := item[0]; c { - case 'n': // null - return nil - - case 't', 'f': // true, false - return c == 't' - - case '"': // string - s, ok := unquote(item) - if !ok { - d.error(errPhase) - } - return s - - default: // number - if c != '-' && (c < '0' || c > '9') { - d.error(errPhase) - } - n, err := d.convertNumber(string(item)) - if err != nil { - d.saveError(err) - } - return n - } -} - -// getu4 decodes \uXXXX from the beginning of s, returning the hex value, -// or it returns -1. -func getu4(s []byte) rune { - if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { - return -1 - } - r, err := strconv.ParseUint(string(s[2:6]), 16, 64) - if err != nil { - return -1 - } - return rune(r) -} - -// unquote converts a quoted JSON string literal s into an actual string t. -// The rules are different than for Go, so cannot use strconv.Unquote. -func unquote(s []byte) (t string, ok bool) { - s, ok = unquoteBytes(s) - t = string(s) - return -} - -func unquoteBytes(s []byte) (t []byte, ok bool) { - if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' { - return - } - s = s[1 : len(s)-1] - - // Check for unusual characters. If there are none, - // then no unquoting is needed, so return a slice of the - // original bytes. - r := 0 - for r < len(s) { - c := s[r] - if c == '\\' || c == '"' || c < ' ' { - break - } - if c < utf8.RuneSelf { - r++ - continue - } - rr, size := utf8.DecodeRune(s[r:]) - if rr == utf8.RuneError && size == 1 { - break - } - r += size - } - if r == len(s) { - return s, true - } - - b := make([]byte, len(s)+2*utf8.UTFMax) - w := copy(b, s[0:r]) - for r < len(s) { - // Out of room? Can only happen if s is full of - // malformed UTF-8 and we're replacing each - // byte with RuneError. - if w >= len(b)-2*utf8.UTFMax { - nb := make([]byte, (len(b)+utf8.UTFMax)*2) - copy(nb, b[0:w]) - b = nb - } - switch c := s[r]; { - case c == '\\': - r++ - if r >= len(s) { - return - } - switch s[r] { - default: - return - case '"', '\\', '/', '\'': - b[w] = s[r] - r++ - w++ - case 'b': - b[w] = '\b' - r++ - w++ - case 'f': - b[w] = '\f' - r++ - w++ - case 'n': - b[w] = '\n' - r++ - w++ - case 'r': - b[w] = '\r' - r++ - w++ - case 't': - b[w] = '\t' - r++ - w++ - case 'u': - r-- - rr := getu4(s[r:]) - if rr < 0 { - return - } - r += 6 - if utf16.IsSurrogate(rr) { - rr1 := getu4(s[r:]) - if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar { - // A valid pair; consume. - r += 6 - w += utf8.EncodeRune(b[w:], dec) - break - } - // Invalid surrogate; fall back to replacement rune. - rr = unicode.ReplacementChar - } - w += utf8.EncodeRune(b[w:], rr) - } - - // Quote, control characters are invalid. - case c == '"', c < ' ': - return - - // ASCII - case c < utf8.RuneSelf: - b[w] = c - r++ - w++ - - // Coerce to well-formed UTF-8. - default: - rr, size := utf8.DecodeRune(s[r:]) - r += size - w += utf8.EncodeRune(b[w:], rr) - } - } - return b[0:w], true -} diff --git a/vendor/github.com/go-jose/go-jose/v4/json/encode.go b/vendor/github.com/go-jose/go-jose/v4/json/encode.go deleted file mode 100644 index 98de68ce1e..0000000000 --- a/vendor/github.com/go-jose/go-jose/v4/json/encode.go +++ /dev/null @@ -1,1197 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package json implements encoding and decoding of JSON objects as defined in -// RFC 4627. The mapping between JSON objects and Go values is described -// in the documentation for the Marshal and Unmarshal functions. -// -// See "JSON and Go" for an introduction to this package: -// https://golang.org/doc/articles/json_and_go.html -package json - -import ( - "bytes" - "encoding" - "encoding/base64" - "fmt" - "math" - "reflect" - "runtime" - "sort" - "strconv" - "strings" - "sync" - "unicode" - "unicode/utf8" -) - -// Marshal returns the JSON encoding of v. -// -// Marshal traverses the value v recursively. -// If an encountered value implements the Marshaler interface -// and is not a nil pointer, Marshal calls its MarshalJSON method -// to produce JSON. If no MarshalJSON method is present but the -// value implements encoding.TextMarshaler instead, Marshal calls -// its MarshalText method. -// The nil pointer exception is not strictly necessary -// but mimics a similar, necessary exception in the behavior of -// UnmarshalJSON. -// -// Otherwise, Marshal uses the following type-dependent default encodings: -// -// Boolean values encode as JSON booleans. -// -// Floating point, integer, and Number values encode as JSON numbers. -// -// String values encode as JSON strings coerced to valid UTF-8, -// replacing invalid bytes with the Unicode replacement rune. -// The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e" -// to keep some browsers from misinterpreting JSON output as HTML. -// Ampersand "&" is also escaped to "\u0026" for the same reason. -// -// Array and slice values encode as JSON arrays, except that -// []byte encodes as a base64-encoded string, and a nil slice -// encodes as the null JSON object. -// -// Struct values encode as JSON objects. Each exported struct field -// becomes a member of the object unless -// - the field's tag is "-", or -// - the field is empty and its tag specifies the "omitempty" option. -// -// The empty values are false, 0, any -// nil pointer or interface value, and any array, slice, map, or string of -// length zero. The object's default key string is the struct field name -// but can be specified in the struct field's tag value. The "json" key in -// the struct field's tag value is the key name, followed by an optional comma -// and options. Examples: -// -// // Field is ignored by this package. -// Field int `json:"-"` -// -// // Field appears in JSON as key "myName". -// Field int `json:"myName"` -// -// // Field appears in JSON as key "myName" and -// // the field is omitted from the object if its value is empty, -// // as defined above. -// Field int `json:"myName,omitempty"` -// -// // Field appears in JSON as key "Field" (the default), but -// // the field is skipped if empty. -// // Note the leading comma. -// Field int `json:",omitempty"` -// -// The "string" option signals that a field is stored as JSON inside a -// JSON-encoded string. It applies only to fields of string, floating point, -// integer, or boolean types. This extra level of encoding is sometimes used -// when communicating with JavaScript programs: -// -// Int64String int64 `json:",string"` -// -// The key name will be used if it's a non-empty string consisting of -// only Unicode letters, digits, dollar signs, percent signs, hyphens, -// underscores and slashes. -// -// Anonymous struct fields are usually marshaled as if their inner exported fields -// were fields in the outer struct, subject to the usual Go visibility rules amended -// as described in the next paragraph. -// An anonymous struct field with a name given in its JSON tag is treated as -// having that name, rather than being anonymous. -// An anonymous struct field of interface type is treated the same as having -// that type as its name, rather than being anonymous. -// -// The Go visibility rules for struct fields are amended for JSON when -// deciding which field to marshal or unmarshal. If there are -// multiple fields at the same level, and that level is the least -// nested (and would therefore be the nesting level selected by the -// usual Go rules), the following extra rules apply: -// -// 1) Of those fields, if any are JSON-tagged, only tagged fields are considered, -// even if there are multiple untagged fields that would otherwise conflict. -// 2) If there is exactly one field (tagged or not according to the first rule), that is selected. -// 3) Otherwise there are multiple fields, and all are ignored; no error occurs. -// -// Handling of anonymous struct fields is new in Go 1.1. -// Prior to Go 1.1, anonymous struct fields were ignored. To force ignoring of -// an anonymous struct field in both current and earlier versions, give the field -// a JSON tag of "-". -// -// Map values encode as JSON objects. -// The map's key type must be string; the map keys are used as JSON object -// keys, subject to the UTF-8 coercion described for string values above. -// -// Pointer values encode as the value pointed to. -// A nil pointer encodes as the null JSON object. -// -// Interface values encode as the value contained in the interface. -// A nil interface value encodes as the null JSON object. -// -// Channel, complex, and function values cannot be encoded in JSON. -// Attempting to encode such a value causes Marshal to return -// an UnsupportedTypeError. -// -// JSON cannot represent cyclic data structures and Marshal does not -// handle them. Passing cyclic structures to Marshal will result in -// an infinite recursion. -func Marshal(v interface{}) ([]byte, error) { - e := &encodeState{} - err := e.marshal(v) - if err != nil { - return nil, err - } - return e.Bytes(), nil -} - -// MarshalIndent is like Marshal but applies Indent to format the output. -func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { - b, err := Marshal(v) - if err != nil { - return nil, err - } - var buf bytes.Buffer - err = Indent(&buf, b, prefix, indent) - if err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029 -// characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029 -// so that the JSON will be safe to embed inside HTML
Total Ingest Rate API Ingest Rate Rule Ingest Rate# Queried Ingesters
{{ printf "%.2f" .UserStats.IngestionRate }} {{ printf "%.2f" .UserStats.APIIngestionRate }} {{ printf "%.2f" .UserStats.RuleIngestionRate }}{{ .UserStats.QueriedIngesters }}